Contents of /trunk/kernel-alx/patches-4.9/0102-4.9.3-all-fixes.patch
Parent Directory | Revision Log
Revision 2956 -
(show annotations)
(download)
Mon Jul 24 12:03:46 2017 UTC (7 years, 3 months ago) by niro
File size: 264856 byte(s)
Mon Jul 24 12:03:46 2017 UTC (7 years, 3 months ago) by niro
File size: 264856 byte(s)
-added patches-4.9
1 | diff --git a/Documentation/conf.py b/Documentation/conf.py |
2 | index bf6f310e5170..d769cd89a9f7 100644 |
3 | --- a/Documentation/conf.py |
4 | +++ b/Documentation/conf.py |
5 | @@ -37,7 +37,7 @@ from load_config import loadConfig |
6 | extensions = ['kernel-doc', 'rstFlatTable', 'kernel_include', 'cdomain'] |
7 | |
8 | # The name of the math extension changed on Sphinx 1.4 |
9 | -if minor > 3: |
10 | +if major == 1 and minor > 3: |
11 | extensions.append("sphinx.ext.imgmath") |
12 | else: |
13 | extensions.append("sphinx.ext.pngmath") |
14 | @@ -332,6 +332,10 @@ latex_elements = { |
15 | ''' |
16 | } |
17 | |
18 | +# Fix reference escape troubles with Sphinx 1.4.x |
19 | +if major == 1 and minor > 3: |
20 | + latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n' |
21 | + |
22 | # Grouping the document tree into LaTeX files. List of tuples |
23 | # (source start file, target name, title, |
24 | # author, documentclass [howto, manual, or own class]). |
25 | diff --git a/Documentation/media/index.rst b/Documentation/media/index.rst |
26 | index e347a3e7bdef..7f8f0af620ce 100644 |
27 | --- a/Documentation/media/index.rst |
28 | +++ b/Documentation/media/index.rst |
29 | @@ -1,11 +1,6 @@ |
30 | Linux Media Subsystem Documentation |
31 | =================================== |
32 | |
33 | -.. Sphinx 1.4.x has a definition for DUrole that doesn't work on alltt blocks |
34 | -.. raw:: latex |
35 | - |
36 | - \renewcommand*{\DUrole}[2]{ #2 } |
37 | - |
38 | Contents: |
39 | |
40 | .. toctree:: |
41 | diff --git a/Makefile b/Makefile |
42 | index c9ce897465c5..ae42a0aaab06 100644 |
43 | --- a/Makefile |
44 | +++ b/Makefile |
45 | @@ -1,6 +1,6 @@ |
46 | VERSION = 4 |
47 | PATCHLEVEL = 9 |
48 | -SUBLEVEL = 2 |
49 | +SUBLEVEL = 3 |
50 | EXTRAVERSION = |
51 | NAME = Roaring Lionus |
52 | |
53 | diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi |
54 | index 9365580a194f..725ecb3c5fb4 100644 |
55 | --- a/arch/arm/boot/dts/r8a7794.dtsi |
56 | +++ b/arch/arm/boot/dts/r8a7794.dtsi |
57 | @@ -1260,7 +1260,7 @@ |
58 | mstp7_clks: mstp7_clks@e615014c { |
59 | compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks"; |
60 | reg = <0 0xe615014c 0 4>, <0 0xe61501c4 0 4>; |
61 | - clocks = <&mp_clk>, <&mp_clk>, |
62 | + clocks = <&mp_clk>, <&hp_clk>, |
63 | <&zs_clk>, <&p_clk>, <&p_clk>, <&zs_clk>, |
64 | <&zs_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, |
65 | <&zx_clk>; |
66 | diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts |
67 | index ba5bca0fe997..44377a98cc89 100644 |
68 | --- a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts |
69 | +++ b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts |
70 | @@ -227,3 +227,8 @@ |
71 | pinctrl-0 = <&uart0_pins_a>; |
72 | status = "okay"; |
73 | }; |
74 | + |
75 | +&usbphy { |
76 | + /* VBUS on usb host ports are tied to DC5V and therefore always on */ |
77 | + status = "okay"; |
78 | +}; |
79 | diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c |
80 | index aef022a87c53..04410d9f5e72 100644 |
81 | --- a/arch/arm/crypto/aes-ce-glue.c |
82 | +++ b/arch/arm/crypto/aes-ce-glue.c |
83 | @@ -88,8 +88,13 @@ static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, |
84 | u32 *rki = ctx->key_enc + (i * kwords); |
85 | u32 *rko = rki + kwords; |
86 | |
87 | +#ifndef CONFIG_CPU_BIG_ENDIAN |
88 | rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8); |
89 | rko[0] = rko[0] ^ rki[0] ^ rcon[i]; |
90 | +#else |
91 | + rko[0] = rol32(ce_aes_sub(rki[kwords - 1]), 8); |
92 | + rko[0] = rko[0] ^ rki[0] ^ (rcon[i] << 24); |
93 | +#endif |
94 | rko[1] = rko[0] ^ rki[1]; |
95 | rko[2] = rko[1] ^ rki[2]; |
96 | rko[3] = rko[2] ^ rki[3]; |
97 | diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c |
98 | index ed3d0e9f72ac..d7a43afdfac0 100644 |
99 | --- a/arch/arm/mach-davinci/da850.c |
100 | +++ b/arch/arm/mach-davinci/da850.c |
101 | @@ -319,6 +319,16 @@ static struct clk emac_clk = { |
102 | .gpsc = 1, |
103 | }; |
104 | |
105 | +/* |
106 | + * In order to avoid adding the emac_clk to the clock lookup table twice (and |
107 | + * screwing up the linked list in the process) create a separate clock for |
108 | + * mdio inheriting the rate from emac_clk. |
109 | + */ |
110 | +static struct clk mdio_clk = { |
111 | + .name = "mdio", |
112 | + .parent = &emac_clk, |
113 | +}; |
114 | + |
115 | static struct clk mcasp_clk = { |
116 | .name = "mcasp", |
117 | .parent = &async3_clk, |
118 | @@ -494,7 +504,7 @@ static struct clk_lookup da850_clks[] = { |
119 | CLK(NULL, "arm", &arm_clk), |
120 | CLK(NULL, "rmii", &rmii_clk), |
121 | CLK("davinci_emac.1", NULL, &emac_clk), |
122 | - CLK("davinci_mdio.0", "fck", &emac_clk), |
123 | + CLK("davinci_mdio.0", "fck", &mdio_clk), |
124 | CLK("davinci-mcasp.0", NULL, &mcasp_clk), |
125 | CLK("davinci-mcbsp.0", NULL, &mcbsp0_clk), |
126 | CLK("davinci-mcbsp.1", NULL, &mcbsp1_clk), |
127 | diff --git a/arch/arm64/boot/dts/hisilicon/hip06.dtsi b/arch/arm64/boot/dts/hisilicon/hip06.dtsi |
128 | index b548763366dd..af450413b9dd 100644 |
129 | --- a/arch/arm64/boot/dts/hisilicon/hip06.dtsi |
130 | +++ b/arch/arm64/boot/dts/hisilicon/hip06.dtsi |
131 | @@ -322,7 +322,7 @@ |
132 | compatible = "generic-ohci"; |
133 | reg = <0x0 0xa7030000 0x0 0x10000>; |
134 | interrupt-parent = <&mbigen_usb>; |
135 | - interrupts = <64 4>; |
136 | + interrupts = <640 4>; |
137 | dma-coherent; |
138 | status = "disabled"; |
139 | }; |
140 | @@ -331,7 +331,7 @@ |
141 | compatible = "generic-ehci"; |
142 | reg = <0x0 0xa7020000 0x0 0x10000>; |
143 | interrupt-parent = <&mbigen_usb>; |
144 | - interrupts = <65 4>; |
145 | + interrupts = <641 4>; |
146 | dma-coherent; |
147 | status = "disabled"; |
148 | }; |
149 | diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S |
150 | index a2a7fbcacc14..3363560c79b7 100644 |
151 | --- a/arch/arm64/crypto/aes-ce-ccm-core.S |
152 | +++ b/arch/arm64/crypto/aes-ce-ccm-core.S |
153 | @@ -9,6 +9,7 @@ |
154 | */ |
155 | |
156 | #include <linux/linkage.h> |
157 | +#include <asm/assembler.h> |
158 | |
159 | .text |
160 | .arch armv8-a+crypto |
161 | @@ -19,7 +20,7 @@ |
162 | */ |
163 | ENTRY(ce_aes_ccm_auth_data) |
164 | ldr w8, [x3] /* leftover from prev round? */ |
165 | - ld1 {v0.2d}, [x0] /* load mac */ |
166 | + ld1 {v0.16b}, [x0] /* load mac */ |
167 | cbz w8, 1f |
168 | sub w8, w8, #16 |
169 | eor v1.16b, v1.16b, v1.16b |
170 | @@ -31,7 +32,7 @@ ENTRY(ce_aes_ccm_auth_data) |
171 | beq 8f /* out of input? */ |
172 | cbnz w8, 0b |
173 | eor v0.16b, v0.16b, v1.16b |
174 | -1: ld1 {v3.2d}, [x4] /* load first round key */ |
175 | +1: ld1 {v3.16b}, [x4] /* load first round key */ |
176 | prfm pldl1strm, [x1] |
177 | cmp w5, #12 /* which key size? */ |
178 | add x6, x4, #16 |
179 | @@ -41,17 +42,17 @@ ENTRY(ce_aes_ccm_auth_data) |
180 | mov v5.16b, v3.16b |
181 | b 4f |
182 | 2: mov v4.16b, v3.16b |
183 | - ld1 {v5.2d}, [x6], #16 /* load 2nd round key */ |
184 | + ld1 {v5.16b}, [x6], #16 /* load 2nd round key */ |
185 | 3: aese v0.16b, v4.16b |
186 | aesmc v0.16b, v0.16b |
187 | -4: ld1 {v3.2d}, [x6], #16 /* load next round key */ |
188 | +4: ld1 {v3.16b}, [x6], #16 /* load next round key */ |
189 | aese v0.16b, v5.16b |
190 | aesmc v0.16b, v0.16b |
191 | -5: ld1 {v4.2d}, [x6], #16 /* load next round key */ |
192 | +5: ld1 {v4.16b}, [x6], #16 /* load next round key */ |
193 | subs w7, w7, #3 |
194 | aese v0.16b, v3.16b |
195 | aesmc v0.16b, v0.16b |
196 | - ld1 {v5.2d}, [x6], #16 /* load next round key */ |
197 | + ld1 {v5.16b}, [x6], #16 /* load next round key */ |
198 | bpl 3b |
199 | aese v0.16b, v4.16b |
200 | subs w2, w2, #16 /* last data? */ |
201 | @@ -60,7 +61,7 @@ ENTRY(ce_aes_ccm_auth_data) |
202 | ld1 {v1.16b}, [x1], #16 /* load next input block */ |
203 | eor v0.16b, v0.16b, v1.16b /* xor with mac */ |
204 | bne 1b |
205 | -6: st1 {v0.2d}, [x0] /* store mac */ |
206 | +6: st1 {v0.16b}, [x0] /* store mac */ |
207 | beq 10f |
208 | adds w2, w2, #16 |
209 | beq 10f |
210 | @@ -79,7 +80,7 @@ ENTRY(ce_aes_ccm_auth_data) |
211 | adds w7, w7, #1 |
212 | bne 9b |
213 | eor v0.16b, v0.16b, v1.16b |
214 | - st1 {v0.2d}, [x0] |
215 | + st1 {v0.16b}, [x0] |
216 | 10: str w8, [x3] |
217 | ret |
218 | ENDPROC(ce_aes_ccm_auth_data) |
219 | @@ -89,27 +90,27 @@ ENDPROC(ce_aes_ccm_auth_data) |
220 | * u32 rounds); |
221 | */ |
222 | ENTRY(ce_aes_ccm_final) |
223 | - ld1 {v3.2d}, [x2], #16 /* load first round key */ |
224 | - ld1 {v0.2d}, [x0] /* load mac */ |
225 | + ld1 {v3.16b}, [x2], #16 /* load first round key */ |
226 | + ld1 {v0.16b}, [x0] /* load mac */ |
227 | cmp w3, #12 /* which key size? */ |
228 | sub w3, w3, #2 /* modified # of rounds */ |
229 | - ld1 {v1.2d}, [x1] /* load 1st ctriv */ |
230 | + ld1 {v1.16b}, [x1] /* load 1st ctriv */ |
231 | bmi 0f |
232 | bne 3f |
233 | mov v5.16b, v3.16b |
234 | b 2f |
235 | 0: mov v4.16b, v3.16b |
236 | -1: ld1 {v5.2d}, [x2], #16 /* load next round key */ |
237 | +1: ld1 {v5.16b}, [x2], #16 /* load next round key */ |
238 | aese v0.16b, v4.16b |
239 | aesmc v0.16b, v0.16b |
240 | aese v1.16b, v4.16b |
241 | aesmc v1.16b, v1.16b |
242 | -2: ld1 {v3.2d}, [x2], #16 /* load next round key */ |
243 | +2: ld1 {v3.16b}, [x2], #16 /* load next round key */ |
244 | aese v0.16b, v5.16b |
245 | aesmc v0.16b, v0.16b |
246 | aese v1.16b, v5.16b |
247 | aesmc v1.16b, v1.16b |
248 | -3: ld1 {v4.2d}, [x2], #16 /* load next round key */ |
249 | +3: ld1 {v4.16b}, [x2], #16 /* load next round key */ |
250 | subs w3, w3, #3 |
251 | aese v0.16b, v3.16b |
252 | aesmc v0.16b, v0.16b |
253 | @@ -120,47 +121,47 @@ ENTRY(ce_aes_ccm_final) |
254 | aese v1.16b, v4.16b |
255 | /* final round key cancels out */ |
256 | eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */ |
257 | - st1 {v0.2d}, [x0] /* store result */ |
258 | + st1 {v0.16b}, [x0] /* store result */ |
259 | ret |
260 | ENDPROC(ce_aes_ccm_final) |
261 | |
262 | .macro aes_ccm_do_crypt,enc |
263 | ldr x8, [x6, #8] /* load lower ctr */ |
264 | - ld1 {v0.2d}, [x5] /* load mac */ |
265 | - rev x8, x8 /* keep swabbed ctr in reg */ |
266 | + ld1 {v0.16b}, [x5] /* load mac */ |
267 | +CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ |
268 | 0: /* outer loop */ |
269 | - ld1 {v1.1d}, [x6] /* load upper ctr */ |
270 | + ld1 {v1.8b}, [x6] /* load upper ctr */ |
271 | prfm pldl1strm, [x1] |
272 | add x8, x8, #1 |
273 | rev x9, x8 |
274 | cmp w4, #12 /* which key size? */ |
275 | sub w7, w4, #2 /* get modified # of rounds */ |
276 | ins v1.d[1], x9 /* no carry in lower ctr */ |
277 | - ld1 {v3.2d}, [x3] /* load first round key */ |
278 | + ld1 {v3.16b}, [x3] /* load first round key */ |
279 | add x10, x3, #16 |
280 | bmi 1f |
281 | bne 4f |
282 | mov v5.16b, v3.16b |
283 | b 3f |
284 | 1: mov v4.16b, v3.16b |
285 | - ld1 {v5.2d}, [x10], #16 /* load 2nd round key */ |
286 | + ld1 {v5.16b}, [x10], #16 /* load 2nd round key */ |
287 | 2: /* inner loop: 3 rounds, 2x interleaved */ |
288 | aese v0.16b, v4.16b |
289 | aesmc v0.16b, v0.16b |
290 | aese v1.16b, v4.16b |
291 | aesmc v1.16b, v1.16b |
292 | -3: ld1 {v3.2d}, [x10], #16 /* load next round key */ |
293 | +3: ld1 {v3.16b}, [x10], #16 /* load next round key */ |
294 | aese v0.16b, v5.16b |
295 | aesmc v0.16b, v0.16b |
296 | aese v1.16b, v5.16b |
297 | aesmc v1.16b, v1.16b |
298 | -4: ld1 {v4.2d}, [x10], #16 /* load next round key */ |
299 | +4: ld1 {v4.16b}, [x10], #16 /* load next round key */ |
300 | subs w7, w7, #3 |
301 | aese v0.16b, v3.16b |
302 | aesmc v0.16b, v0.16b |
303 | aese v1.16b, v3.16b |
304 | aesmc v1.16b, v1.16b |
305 | - ld1 {v5.2d}, [x10], #16 /* load next round key */ |
306 | + ld1 {v5.16b}, [x10], #16 /* load next round key */ |
307 | bpl 2b |
308 | aese v0.16b, v4.16b |
309 | aese v1.16b, v4.16b |
310 | @@ -177,14 +178,14 @@ ENDPROC(ce_aes_ccm_final) |
311 | eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */ |
312 | st1 {v1.16b}, [x0], #16 /* write output block */ |
313 | bne 0b |
314 | - rev x8, x8 |
315 | - st1 {v0.2d}, [x5] /* store mac */ |
316 | +CPU_LE( rev x8, x8 ) |
317 | + st1 {v0.16b}, [x5] /* store mac */ |
318 | str x8, [x6, #8] /* store lsb end of ctr (BE) */ |
319 | 5: ret |
320 | |
321 | 6: eor v0.16b, v0.16b, v5.16b /* final round mac */ |
322 | eor v1.16b, v1.16b, v5.16b /* final round enc */ |
323 | - st1 {v0.2d}, [x5] /* store mac */ |
324 | + st1 {v0.16b}, [x5] /* store mac */ |
325 | add w2, w2, #16 /* process partial tail block */ |
326 | 7: ldrb w9, [x1], #1 /* get 1 byte of input */ |
327 | umov w6, v1.b[0] /* get top crypted ctr byte */ |
328 | diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c |
329 | index f7bd9bf0bbb3..50d9fe11d0c8 100644 |
330 | --- a/arch/arm64/crypto/aes-ce-cipher.c |
331 | +++ b/arch/arm64/crypto/aes-ce-cipher.c |
332 | @@ -47,24 +47,24 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) |
333 | kernel_neon_begin_partial(4); |
334 | |
335 | __asm__(" ld1 {v0.16b}, %[in] ;" |
336 | - " ld1 {v1.2d}, [%[key]], #16 ;" |
337 | + " ld1 {v1.16b}, [%[key]], #16 ;" |
338 | " cmp %w[rounds], #10 ;" |
339 | " bmi 0f ;" |
340 | " bne 3f ;" |
341 | " mov v3.16b, v1.16b ;" |
342 | " b 2f ;" |
343 | "0: mov v2.16b, v1.16b ;" |
344 | - " ld1 {v3.2d}, [%[key]], #16 ;" |
345 | + " ld1 {v3.16b}, [%[key]], #16 ;" |
346 | "1: aese v0.16b, v2.16b ;" |
347 | " aesmc v0.16b, v0.16b ;" |
348 | - "2: ld1 {v1.2d}, [%[key]], #16 ;" |
349 | + "2: ld1 {v1.16b}, [%[key]], #16 ;" |
350 | " aese v0.16b, v3.16b ;" |
351 | " aesmc v0.16b, v0.16b ;" |
352 | - "3: ld1 {v2.2d}, [%[key]], #16 ;" |
353 | + "3: ld1 {v2.16b}, [%[key]], #16 ;" |
354 | " subs %w[rounds], %w[rounds], #3 ;" |
355 | " aese v0.16b, v1.16b ;" |
356 | " aesmc v0.16b, v0.16b ;" |
357 | - " ld1 {v3.2d}, [%[key]], #16 ;" |
358 | + " ld1 {v3.16b}, [%[key]], #16 ;" |
359 | " bpl 1b ;" |
360 | " aese v0.16b, v2.16b ;" |
361 | " eor v0.16b, v0.16b, v3.16b ;" |
362 | @@ -92,24 +92,24 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) |
363 | kernel_neon_begin_partial(4); |
364 | |
365 | __asm__(" ld1 {v0.16b}, %[in] ;" |
366 | - " ld1 {v1.2d}, [%[key]], #16 ;" |
367 | + " ld1 {v1.16b}, [%[key]], #16 ;" |
368 | " cmp %w[rounds], #10 ;" |
369 | " bmi 0f ;" |
370 | " bne 3f ;" |
371 | " mov v3.16b, v1.16b ;" |
372 | " b 2f ;" |
373 | "0: mov v2.16b, v1.16b ;" |
374 | - " ld1 {v3.2d}, [%[key]], #16 ;" |
375 | + " ld1 {v3.16b}, [%[key]], #16 ;" |
376 | "1: aesd v0.16b, v2.16b ;" |
377 | " aesimc v0.16b, v0.16b ;" |
378 | - "2: ld1 {v1.2d}, [%[key]], #16 ;" |
379 | + "2: ld1 {v1.16b}, [%[key]], #16 ;" |
380 | " aesd v0.16b, v3.16b ;" |
381 | " aesimc v0.16b, v0.16b ;" |
382 | - "3: ld1 {v2.2d}, [%[key]], #16 ;" |
383 | + "3: ld1 {v2.16b}, [%[key]], #16 ;" |
384 | " subs %w[rounds], %w[rounds], #3 ;" |
385 | " aesd v0.16b, v1.16b ;" |
386 | " aesimc v0.16b, v0.16b ;" |
387 | - " ld1 {v3.2d}, [%[key]], #16 ;" |
388 | + " ld1 {v3.16b}, [%[key]], #16 ;" |
389 | " bpl 1b ;" |
390 | " aesd v0.16b, v2.16b ;" |
391 | " eor v0.16b, v0.16b, v3.16b ;" |
392 | @@ -173,7 +173,12 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, |
393 | u32 *rki = ctx->key_enc + (i * kwords); |
394 | u32 *rko = rki + kwords; |
395 | |
396 | +#ifndef CONFIG_CPU_BIG_ENDIAN |
397 | rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; |
398 | +#else |
399 | + rko[0] = rol32(aes_sub(rki[kwords - 1]), 8) ^ (rcon[i] << 24) ^ |
400 | + rki[0]; |
401 | +#endif |
402 | rko[1] = rko[0] ^ rki[1]; |
403 | rko[2] = rko[1] ^ rki[2]; |
404 | rko[3] = rko[2] ^ rki[3]; |
405 | diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S |
406 | index 78f3cfe92c08..b46093d567e5 100644 |
407 | --- a/arch/arm64/crypto/aes-ce.S |
408 | +++ b/arch/arm64/crypto/aes-ce.S |
409 | @@ -10,6 +10,7 @@ |
410 | */ |
411 | |
412 | #include <linux/linkage.h> |
413 | +#include <asm/assembler.h> |
414 | |
415 | #define AES_ENTRY(func) ENTRY(ce_ ## func) |
416 | #define AES_ENDPROC(func) ENDPROC(ce_ ## func) |
417 | diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S |
418 | index f6e372c528eb..c53dbeae79f2 100644 |
419 | --- a/arch/arm64/crypto/aes-modes.S |
420 | +++ b/arch/arm64/crypto/aes-modes.S |
421 | @@ -386,7 +386,8 @@ AES_ENDPROC(aes_ctr_encrypt) |
422 | .endm |
423 | |
424 | .Lxts_mul_x: |
425 | - .word 1, 0, 0x87, 0 |
426 | +CPU_LE( .quad 1, 0x87 ) |
427 | +CPU_BE( .quad 0x87, 1 ) |
428 | |
429 | AES_ENTRY(aes_xts_encrypt) |
430 | FRAME_PUSH |
431 | diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S |
432 | index b93170e1cc93..85f07ead7c5c 100644 |
433 | --- a/arch/arm64/crypto/aes-neon.S |
434 | +++ b/arch/arm64/crypto/aes-neon.S |
435 | @@ -9,6 +9,7 @@ |
436 | */ |
437 | |
438 | #include <linux/linkage.h> |
439 | +#include <asm/assembler.h> |
440 | |
441 | #define AES_ENTRY(func) ENTRY(neon_ ## func) |
442 | #define AES_ENDPROC(func) ENDPROC(neon_ ## func) |
443 | @@ -83,13 +84,13 @@ |
444 | .endm |
445 | |
446 | .macro do_block, enc, in, rounds, rk, rkp, i |
447 | - ld1 {v15.16b}, [\rk] |
448 | + ld1 {v15.4s}, [\rk] |
449 | add \rkp, \rk, #16 |
450 | mov \i, \rounds |
451 | 1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */ |
452 | tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */ |
453 | sub_bytes \in |
454 | - ld1 {v15.16b}, [\rkp], #16 |
455 | + ld1 {v15.4s}, [\rkp], #16 |
456 | subs \i, \i, #1 |
457 | beq 2222f |
458 | .if \enc == 1 |
459 | @@ -229,7 +230,7 @@ |
460 | .endm |
461 | |
462 | .macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i |
463 | - ld1 {v15.16b}, [\rk] |
464 | + ld1 {v15.4s}, [\rk] |
465 | add \rkp, \rk, #16 |
466 | mov \i, \rounds |
467 | 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ |
468 | @@ -237,7 +238,7 @@ |
469 | sub_bytes_2x \in0, \in1 |
470 | tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */ |
471 | tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ |
472 | - ld1 {v15.16b}, [\rkp], #16 |
473 | + ld1 {v15.4s}, [\rkp], #16 |
474 | subs \i, \i, #1 |
475 | beq 2222f |
476 | .if \enc == 1 |
477 | @@ -254,7 +255,7 @@ |
478 | .endm |
479 | |
480 | .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i |
481 | - ld1 {v15.16b}, [\rk] |
482 | + ld1 {v15.4s}, [\rk] |
483 | add \rkp, \rk, #16 |
484 | mov \i, \rounds |
485 | 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ |
486 | @@ -266,7 +267,7 @@ |
487 | tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ |
488 | tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */ |
489 | tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */ |
490 | - ld1 {v15.16b}, [\rkp], #16 |
491 | + ld1 {v15.4s}, [\rkp], #16 |
492 | subs \i, \i, #1 |
493 | beq 2222f |
494 | .if \enc == 1 |
495 | @@ -306,12 +307,16 @@ |
496 | .text |
497 | .align 4 |
498 | .LForward_ShiftRows: |
499 | - .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3 |
500 | - .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb |
501 | +CPU_LE( .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3 ) |
502 | +CPU_LE( .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb ) |
503 | +CPU_BE( .byte 0xb, 0x6, 0x1, 0xc, 0x7, 0x2, 0xd, 0x8 ) |
504 | +CPU_BE( .byte 0x3, 0xe, 0x9, 0x4, 0xf, 0xa, 0x5, 0x0 ) |
505 | |
506 | .LReverse_ShiftRows: |
507 | - .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb |
508 | - .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3 |
509 | +CPU_LE( .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb ) |
510 | +CPU_LE( .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3 ) |
511 | +CPU_BE( .byte 0x3, 0x6, 0x9, 0xc, 0xf, 0x2, 0x5, 0x8 ) |
512 | +CPU_BE( .byte 0xb, 0xe, 0x1, 0x4, 0x7, 0xa, 0xd, 0x0 ) |
513 | |
514 | .LForward_Sbox: |
515 | .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 |
516 | diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S |
517 | index dc457015884e..f0bb9f0b524f 100644 |
518 | --- a/arch/arm64/crypto/ghash-ce-core.S |
519 | +++ b/arch/arm64/crypto/ghash-ce-core.S |
520 | @@ -29,8 +29,8 @@ |
521 | * struct ghash_key const *k, const char *head) |
522 | */ |
523 | ENTRY(pmull_ghash_update) |
524 | - ld1 {SHASH.16b}, [x3] |
525 | - ld1 {XL.16b}, [x1] |
526 | + ld1 {SHASH.2d}, [x3] |
527 | + ld1 {XL.2d}, [x1] |
528 | movi MASK.16b, #0xe1 |
529 | ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 |
530 | shl MASK.2d, MASK.2d, #57 |
531 | @@ -74,6 +74,6 @@ CPU_LE( rev64 T1.16b, T1.16b ) |
532 | |
533 | cbnz w0, 0b |
534 | |
535 | - st1 {XL.16b}, [x1] |
536 | + st1 {XL.2d}, [x1] |
537 | ret |
538 | ENDPROC(pmull_ghash_update) |
539 | diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S |
540 | index 033aae6d732a..c98e7e849f06 100644 |
541 | --- a/arch/arm64/crypto/sha1-ce-core.S |
542 | +++ b/arch/arm64/crypto/sha1-ce-core.S |
543 | @@ -78,7 +78,7 @@ ENTRY(sha1_ce_transform) |
544 | ld1r {k3.4s}, [x6] |
545 | |
546 | /* load state */ |
547 | - ldr dga, [x0] |
548 | + ld1 {dgav.4s}, [x0] |
549 | ldr dgb, [x0, #16] |
550 | |
551 | /* load sha1_ce_state::finalize */ |
552 | @@ -144,7 +144,7 @@ CPU_LE( rev32 v11.16b, v11.16b ) |
553 | b 1b |
554 | |
555 | /* store new state */ |
556 | -3: str dga, [x0] |
557 | +3: st1 {dgav.4s}, [x0] |
558 | str dgb, [x0, #16] |
559 | ret |
560 | ENDPROC(sha1_ce_transform) |
561 | diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S |
562 | index 5df9d9d470ad..01cfee066837 100644 |
563 | --- a/arch/arm64/crypto/sha2-ce-core.S |
564 | +++ b/arch/arm64/crypto/sha2-ce-core.S |
565 | @@ -85,7 +85,7 @@ ENTRY(sha2_ce_transform) |
566 | ld1 {v12.4s-v15.4s}, [x8] |
567 | |
568 | /* load state */ |
569 | - ldp dga, dgb, [x0] |
570 | + ld1 {dgav.4s, dgbv.4s}, [x0] |
571 | |
572 | /* load sha256_ce_state::finalize */ |
573 | ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize] |
574 | @@ -148,6 +148,6 @@ CPU_LE( rev32 v19.16b, v19.16b ) |
575 | b 1b |
576 | |
577 | /* store new state */ |
578 | -3: stp dga, dgb, [x0] |
579 | +3: st1 {dgav.4s, dgbv.4s}, [x0] |
580 | ret |
581 | ENDPROC(sha2_ce_transform) |
582 | diff --git a/arch/cris/boot/rescue/Makefile b/arch/cris/boot/rescue/Makefile |
583 | index 52bd0bd1dd22..d98edbb30a18 100644 |
584 | --- a/arch/cris/boot/rescue/Makefile |
585 | +++ b/arch/cris/boot/rescue/Makefile |
586 | @@ -10,6 +10,9 @@ |
587 | |
588 | asflags-y += $(LINUXINCLUDE) |
589 | ccflags-y += -O2 $(LINUXINCLUDE) |
590 | + |
591 | +ifdef CONFIG_ETRAX_AXISFLASHMAP |
592 | + |
593 | arch-$(CONFIG_ETRAX_ARCH_V10) = v10 |
594 | arch-$(CONFIG_ETRAX_ARCH_V32) = v32 |
595 | |
596 | @@ -28,6 +31,11 @@ $(obj)/rescue.bin: $(obj)/rescue.o FORCE |
597 | $(call if_changed,objcopy) |
598 | cp -p $(obj)/rescue.bin $(objtree) |
599 | |
600 | +else |
601 | +$(obj)/rescue.bin: |
602 | + |
603 | +endif |
604 | + |
605 | $(obj)/testrescue.bin: $(obj)/testrescue.o |
606 | $(OBJCOPY) $(OBJCOPYFLAGS) $(obj)/testrescue.o tr.bin |
607 | # Pad it to 784 bytes |
608 | diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c |
609 | index 6a02b3a3fa65..e92fb190e2d6 100644 |
610 | --- a/arch/mips/kvm/entry.c |
611 | +++ b/arch/mips/kvm/entry.c |
612 | @@ -521,6 +521,9 @@ void *kvm_mips_build_exit(void *addr) |
613 | uasm_i_and(&p, V0, V0, AT); |
614 | uasm_i_lui(&p, AT, ST0_CU0 >> 16); |
615 | uasm_i_or(&p, V0, V0, AT); |
616 | +#ifdef CONFIG_64BIT |
617 | + uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX); |
618 | +#endif |
619 | uasm_i_mtc0(&p, V0, C0_STATUS); |
620 | uasm_i_ehb(&p); |
621 | |
622 | @@ -643,7 +646,7 @@ static void *kvm_mips_build_ret_to_guest(void *addr) |
623 | |
624 | /* Setup status register for running guest in UM */ |
625 | uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); |
626 | - UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX)); |
627 | + UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX)); |
628 | uasm_i_and(&p, V1, V1, AT); |
629 | uasm_i_mtc0(&p, V1, C0_STATUS); |
630 | uasm_i_ehb(&p); |
631 | diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c |
632 | index 06a60b19acfb..29ec9ab3fd55 100644 |
633 | --- a/arch/mips/kvm/mips.c |
634 | +++ b/arch/mips/kvm/mips.c |
635 | @@ -360,8 +360,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
636 | dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); |
637 | |
638 | /* Invalidate the icache for these ranges */ |
639 | - local_flush_icache_range((unsigned long)gebase, |
640 | - (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); |
641 | + flush_icache_range((unsigned long)gebase, |
642 | + (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); |
643 | |
644 | /* |
645 | * Allocate comm page for guest kernel, a TLB will be reserved for |
646 | diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c |
647 | index 325f30d82b64..47ef8fdcd382 100644 |
648 | --- a/arch/parisc/kernel/time.c |
649 | +++ b/arch/parisc/kernel/time.c |
650 | @@ -289,9 +289,26 @@ void __init time_init(void) |
651 | |
652 | cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */ |
653 | |
654 | - /* register at clocksource framework */ |
655 | - clocksource_register_hz(&clocksource_cr16, cr16_hz); |
656 | - |
657 | /* register as sched_clock source */ |
658 | sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz); |
659 | } |
660 | + |
661 | +static int __init init_cr16_clocksource(void) |
662 | +{ |
663 | + /* |
664 | + * The cr16 interval timers are not syncronized across CPUs, so mark |
665 | + * them unstable and lower rating on SMP systems. |
666 | + */ |
667 | + if (num_online_cpus() > 1) { |
668 | + clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; |
669 | + clocksource_cr16.rating = 0; |
670 | + } |
671 | + |
672 | + /* register at clocksource framework */ |
673 | + clocksource_register_hz(&clocksource_cr16, |
674 | + 100 * PAGE0->mem_10msec); |
675 | + |
676 | + return 0; |
677 | +} |
678 | + |
679 | +device_initcall(init_cr16_clocksource); |
680 | diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c |
681 | index 8ff9253930af..1a0b4f63f0e9 100644 |
682 | --- a/arch/parisc/mm/fault.c |
683 | +++ b/arch/parisc/mm/fault.c |
684 | @@ -234,7 +234,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long code, |
685 | tsk->comm, code, address); |
686 | print_vma_addr(KERN_CONT " in ", regs->iaoq[0]); |
687 | |
688 | - pr_cont(" trap #%lu: %s%c", code, trap_name(code), |
689 | + pr_cont("\ntrap #%lu: %s%c", code, trap_name(code), |
690 | vma ? ',':'\n'); |
691 | |
692 | if (vma) |
693 | diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c |
694 | index 9cc050f9536c..1113389d0a39 100644 |
695 | --- a/arch/s390/crypto/prng.c |
696 | +++ b/arch/s390/crypto/prng.c |
697 | @@ -507,8 +507,10 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf, |
698 | prng_data->prngws.byte_counter += n; |
699 | prng_data->prngws.reseed_counter += n; |
700 | |
701 | - if (copy_to_user(ubuf, prng_data->buf, chunk)) |
702 | - return -EFAULT; |
703 | + if (copy_to_user(ubuf, prng_data->buf, chunk)) { |
704 | + ret = -EFAULT; |
705 | + break; |
706 | + } |
707 | |
708 | nbytes -= chunk; |
709 | ret += chunk; |
710 | diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c |
711 | index e959c02e0cac..8705ee66c087 100644 |
712 | --- a/arch/s390/kernel/topology.c |
713 | +++ b/arch/s390/kernel/topology.c |
714 | @@ -448,6 +448,7 @@ static int __init s390_topology_init(void) |
715 | struct sysinfo_15_1_x *info; |
716 | int i; |
717 | |
718 | + set_sched_topology(s390_topology); |
719 | if (!MACHINE_HAS_TOPOLOGY) |
720 | return 0; |
721 | tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL); |
722 | @@ -460,7 +461,6 @@ static int __init s390_topology_init(void) |
723 | alloc_masks(info, &socket_info, 1); |
724 | alloc_masks(info, &book_info, 2); |
725 | alloc_masks(info, &drawer_info, 3); |
726 | - set_sched_topology(s390_topology); |
727 | return 0; |
728 | } |
729 | early_initcall(s390_topology_init); |
730 | diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c |
731 | index 6b2f72f523b9..049e3860ac54 100644 |
732 | --- a/arch/s390/pci/pci_dma.c |
733 | +++ b/arch/s390/pci/pci_dma.c |
734 | @@ -419,6 +419,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, |
735 | size_t size, dma_addr_t *handle, |
736 | enum dma_data_direction dir) |
737 | { |
738 | + unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; |
739 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
740 | dma_addr_t dma_addr_base, dma_addr; |
741 | int flags = ZPCI_PTE_VALID; |
742 | @@ -426,8 +427,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, |
743 | unsigned long pa = 0; |
744 | int ret; |
745 | |
746 | - size = PAGE_ALIGN(size); |
747 | - dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT); |
748 | + dma_addr_base = dma_alloc_address(dev, nr_pages); |
749 | if (dma_addr_base == DMA_ERROR_CODE) |
750 | return -ENOMEM; |
751 | |
752 | @@ -436,26 +436,27 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, |
753 | flags |= ZPCI_TABLE_PROTECTED; |
754 | |
755 | for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) { |
756 | - pa = page_to_phys(sg_page(s)) + s->offset; |
757 | - ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags); |
758 | + pa = page_to_phys(sg_page(s)); |
759 | + ret = __dma_update_trans(zdev, pa, dma_addr, |
760 | + s->offset + s->length, flags); |
761 | if (ret) |
762 | goto unmap; |
763 | |
764 | - dma_addr += s->length; |
765 | + dma_addr += s->offset + s->length; |
766 | } |
767 | ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags); |
768 | if (ret) |
769 | goto unmap; |
770 | |
771 | *handle = dma_addr_base; |
772 | - atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages); |
773 | + atomic64_add(nr_pages, &zdev->mapped_pages); |
774 | |
775 | return ret; |
776 | |
777 | unmap: |
778 | dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base, |
779 | ZPCI_PTE_INVALID); |
780 | - dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT); |
781 | + dma_free_address(dev, dma_addr_base, nr_pages); |
782 | zpci_err("map error:\n"); |
783 | zpci_err_dma(ret, pa); |
784 | return ret; |
785 | diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h |
786 | index ae135de547f5..835aa51c7f6e 100644 |
787 | --- a/arch/x86/include/uapi/asm/prctl.h |
788 | +++ b/arch/x86/include/uapi/asm/prctl.h |
789 | @@ -6,10 +6,8 @@ |
790 | #define ARCH_GET_FS 0x1003 |
791 | #define ARCH_GET_GS 0x1004 |
792 | |
793 | -#ifdef CONFIG_CHECKPOINT_RESTORE |
794 | -# define ARCH_MAP_VDSO_X32 0x2001 |
795 | -# define ARCH_MAP_VDSO_32 0x2002 |
796 | -# define ARCH_MAP_VDSO_64 0x2003 |
797 | -#endif |
798 | +#define ARCH_MAP_VDSO_X32 0x2001 |
799 | +#define ARCH_MAP_VDSO_32 0x2002 |
800 | +#define ARCH_MAP_VDSO_64 0x2003 |
801 | |
802 | #endif /* _ASM_X86_PRCTL_H */ |
803 | diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
804 | index c2048b44851c..dd62708c6a67 100644 |
805 | --- a/arch/x86/kernel/cpu/common.c |
806 | +++ b/arch/x86/kernel/cpu/common.c |
807 | @@ -667,13 +667,14 @@ void get_cpu_cap(struct cpuinfo_x86 *c) |
808 | c->x86_capability[CPUID_1_EDX] = edx; |
809 | } |
810 | |
811 | + /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ |
812 | + if (c->cpuid_level >= 0x00000006) |
813 | + c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); |
814 | + |
815 | /* Additional Intel-defined flags: level 0x00000007 */ |
816 | if (c->cpuid_level >= 0x00000007) { |
817 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); |
818 | - |
819 | c->x86_capability[CPUID_7_0_EBX] = ebx; |
820 | - |
821 | - c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); |
822 | c->x86_capability[CPUID_7_ECX] = ecx; |
823 | } |
824 | |
825 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
826 | index 04c5d96b1d67..f3648c978d2f 100644 |
827 | --- a/arch/x86/kvm/x86.c |
828 | +++ b/arch/x86/kvm/x86.c |
829 | @@ -3036,6 +3036,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, |
830 | memset(&events->reserved, 0, sizeof(events->reserved)); |
831 | } |
832 | |
833 | +static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags); |
834 | + |
835 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, |
836 | struct kvm_vcpu_events *events) |
837 | { |
838 | @@ -3072,10 +3074,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, |
839 | vcpu->arch.apic->sipi_vector = events->sipi_vector; |
840 | |
841 | if (events->flags & KVM_VCPUEVENT_VALID_SMM) { |
842 | + u32 hflags = vcpu->arch.hflags; |
843 | if (events->smi.smm) |
844 | - vcpu->arch.hflags |= HF_SMM_MASK; |
845 | + hflags |= HF_SMM_MASK; |
846 | else |
847 | - vcpu->arch.hflags &= ~HF_SMM_MASK; |
848 | + hflags &= ~HF_SMM_MASK; |
849 | + kvm_set_hflags(vcpu, hflags); |
850 | + |
851 | vcpu->arch.smi_pending = events->smi.pending; |
852 | if (events->smi.smm_inside_nmi) |
853 | vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; |
854 | diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h |
855 | index 50e30e7b059d..a84332aefc2d 100644 |
856 | --- a/drivers/base/power/power.h |
857 | +++ b/drivers/base/power/power.h |
858 | @@ -21,14 +21,22 @@ extern void pm_runtime_init(struct device *dev); |
859 | extern void pm_runtime_reinit(struct device *dev); |
860 | extern void pm_runtime_remove(struct device *dev); |
861 | |
862 | +#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0) |
863 | +#define WAKE_IRQ_DEDICATED_MANAGED BIT(1) |
864 | +#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \ |
865 | + WAKE_IRQ_DEDICATED_MANAGED) |
866 | + |
867 | struct wake_irq { |
868 | struct device *dev; |
869 | + unsigned int status; |
870 | int irq; |
871 | - bool dedicated_irq:1; |
872 | }; |
873 | |
874 | extern void dev_pm_arm_wake_irq(struct wake_irq *wirq); |
875 | extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq); |
876 | +extern void dev_pm_enable_wake_irq_check(struct device *dev, |
877 | + bool can_change_status); |
878 | +extern void dev_pm_disable_wake_irq_check(struct device *dev); |
879 | |
880 | #ifdef CONFIG_PM_SLEEP |
881 | |
882 | @@ -104,6 +112,15 @@ static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq) |
883 | { |
884 | } |
885 | |
886 | +static inline void dev_pm_enable_wake_irq_check(struct device *dev, |
887 | + bool can_change_status) |
888 | +{ |
889 | +} |
890 | + |
891 | +static inline void dev_pm_disable_wake_irq_check(struct device *dev) |
892 | +{ |
893 | +} |
894 | + |
895 | #endif |
896 | |
897 | #ifdef CONFIG_PM_SLEEP |
898 | diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c |
899 | index 82a081ea4317..23f3b95a1158 100644 |
900 | --- a/drivers/base/power/runtime.c |
901 | +++ b/drivers/base/power/runtime.c |
902 | @@ -515,7 +515,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) |
903 | |
904 | callback = RPM_GET_CALLBACK(dev, runtime_suspend); |
905 | |
906 | - dev_pm_enable_wake_irq(dev); |
907 | + dev_pm_enable_wake_irq_check(dev, true); |
908 | retval = rpm_callback(callback, dev); |
909 | if (retval) |
910 | goto fail; |
911 | @@ -554,7 +554,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) |
912 | return retval; |
913 | |
914 | fail: |
915 | - dev_pm_disable_wake_irq(dev); |
916 | + dev_pm_disable_wake_irq_check(dev); |
917 | __update_runtime_status(dev, RPM_ACTIVE); |
918 | dev->power.deferred_resume = false; |
919 | wake_up_all(&dev->power.wait_queue); |
920 | @@ -737,12 +737,12 @@ static int rpm_resume(struct device *dev, int rpmflags) |
921 | |
922 | callback = RPM_GET_CALLBACK(dev, runtime_resume); |
923 | |
924 | - dev_pm_disable_wake_irq(dev); |
925 | + dev_pm_disable_wake_irq_check(dev); |
926 | retval = rpm_callback(callback, dev); |
927 | if (retval) { |
928 | __update_runtime_status(dev, RPM_SUSPENDED); |
929 | pm_runtime_cancel_pending(dev); |
930 | - dev_pm_enable_wake_irq(dev); |
931 | + dev_pm_enable_wake_irq_check(dev, false); |
932 | } else { |
933 | no_callback: |
934 | __update_runtime_status(dev, RPM_ACTIVE); |
935 | diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c |
936 | index 0d77cd6fd8d1..404d94c6c8bc 100644 |
937 | --- a/drivers/base/power/wakeirq.c |
938 | +++ b/drivers/base/power/wakeirq.c |
939 | @@ -110,8 +110,10 @@ void dev_pm_clear_wake_irq(struct device *dev) |
940 | dev->power.wakeirq = NULL; |
941 | spin_unlock_irqrestore(&dev->power.lock, flags); |
942 | |
943 | - if (wirq->dedicated_irq) |
944 | + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) { |
945 | free_irq(wirq->irq, wirq); |
946 | + wirq->status &= ~WAKE_IRQ_DEDICATED_MASK; |
947 | + } |
948 | kfree(wirq); |
949 | } |
950 | EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq); |
951 | @@ -179,7 +181,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) |
952 | |
953 | wirq->dev = dev; |
954 | wirq->irq = irq; |
955 | - wirq->dedicated_irq = true; |
956 | irq_set_status_flags(irq, IRQ_NOAUTOEN); |
957 | |
958 | /* |
959 | @@ -195,6 +196,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) |
960 | if (err) |
961 | goto err_free_irq; |
962 | |
963 | + wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED; |
964 | + |
965 | return err; |
966 | |
967 | err_free_irq: |
968 | @@ -210,9 +213,9 @@ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq); |
969 | * dev_pm_enable_wake_irq - Enable device wake-up interrupt |
970 | * @dev: Device |
971 | * |
972 | - * Called from the bus code or the device driver for |
973 | - * runtime_suspend() to enable the wake-up interrupt while |
974 | - * the device is running. |
975 | + * Optionally called from the bus code or the device driver for |
976 | + * runtime_resume() to override the PM runtime core managed wake-up |
977 | + * interrupt handling to enable the wake-up interrupt. |
978 | * |
979 | * Note that for runtime_suspend()) the wake-up interrupts |
980 | * should be unconditionally enabled unlike for suspend() |
981 | @@ -222,7 +225,7 @@ void dev_pm_enable_wake_irq(struct device *dev) |
982 | { |
983 | struct wake_irq *wirq = dev->power.wakeirq; |
984 | |
985 | - if (wirq && wirq->dedicated_irq) |
986 | + if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)) |
987 | enable_irq(wirq->irq); |
988 | } |
989 | EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq); |
990 | @@ -231,20 +234,73 @@ EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq); |
991 | * dev_pm_disable_wake_irq - Disable device wake-up interrupt |
992 | * @dev: Device |
993 | * |
994 | - * Called from the bus code or the device driver for |
995 | - * runtime_resume() to disable the wake-up interrupt while |
996 | - * the device is running. |
997 | + * Optionally called from the bus code or the device driver for |
998 | + * runtime_suspend() to override the PM runtime core managed wake-up |
999 | + * interrupt handling to disable the wake-up interrupt. |
1000 | */ |
1001 | void dev_pm_disable_wake_irq(struct device *dev) |
1002 | { |
1003 | struct wake_irq *wirq = dev->power.wakeirq; |
1004 | |
1005 | - if (wirq && wirq->dedicated_irq) |
1006 | + if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)) |
1007 | disable_irq_nosync(wirq->irq); |
1008 | } |
1009 | EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq); |
1010 | |
1011 | /** |
1012 | + * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt |
1013 | + * @dev: Device |
1014 | + * @can_change_status: Can change wake-up interrupt status |
1015 | + * |
1016 | + * Enables wakeirq conditionally. We need to enable wake-up interrupt |
1017 | + * lazily on the first rpm_suspend(). This is needed as the consumer device |
1018 | + * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would |
1019 | + * otherwise try to disable already disabled wakeirq. The wake-up interrupt |
1020 | + * starts disabled with IRQ_NOAUTOEN set. |
1021 | + * |
1022 | + * Should be only called from rpm_suspend() and rpm_resume() path. |
1023 | + * Caller must hold &dev->power.lock to change wirq->status |
1024 | + */ |
1025 | +void dev_pm_enable_wake_irq_check(struct device *dev, |
1026 | + bool can_change_status) |
1027 | +{ |
1028 | + struct wake_irq *wirq = dev->power.wakeirq; |
1029 | + |
1030 | + if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK))) |
1031 | + return; |
1032 | + |
1033 | + if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) { |
1034 | + goto enable; |
1035 | + } else if (can_change_status) { |
1036 | + wirq->status |= WAKE_IRQ_DEDICATED_MANAGED; |
1037 | + goto enable; |
1038 | + } |
1039 | + |
1040 | + return; |
1041 | + |
1042 | +enable: |
1043 | + enable_irq(wirq->irq); |
1044 | +} |
1045 | + |
1046 | +/** |
1047 | + * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt |
1048 | + * @dev: Device |
1049 | + * |
1050 | + * Disables wake-up interrupt conditionally based on status. |
1051 | + * Should be only called from rpm_suspend() and rpm_resume() path. |
1052 | + */ |
1053 | +void dev_pm_disable_wake_irq_check(struct device *dev) |
1054 | +{ |
1055 | + struct wake_irq *wirq = dev->power.wakeirq; |
1056 | + |
1057 | + if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK))) |
1058 | + return; |
1059 | + |
1060 | + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) |
1061 | + disable_irq_nosync(wirq->irq); |
1062 | +} |
1063 | + |
1064 | +/** |
1065 | * dev_pm_arm_wake_irq - Arm device wake-up |
1066 | * @wirq: Device wake-up interrupt |
1067 | * |
1068 | diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c |
1069 | index e3bf31b37138..a1ce0607bf7b 100644 |
1070 | --- a/drivers/char/tpm/tpm_tis_core.c |
1071 | +++ b/drivers/char/tpm/tpm_tis_core.c |
1072 | @@ -185,7 +185,12 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) |
1073 | TPM_STS_DATA_AVAIL | TPM_STS_VALID, |
1074 | chip->timeout_c, |
1075 | &priv->read_queue, true) == 0) { |
1076 | - burstcnt = min_t(int, get_burstcount(chip), count - size); |
1077 | + burstcnt = get_burstcount(chip); |
1078 | + if (burstcnt < 0) { |
1079 | + dev_err(&chip->dev, "Unable to read burstcount\n"); |
1080 | + return burstcnt; |
1081 | + } |
1082 | + burstcnt = min_t(int, burstcnt, count - size); |
1083 | |
1084 | rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality), |
1085 | burstcnt, buf + size); |
1086 | @@ -271,7 +276,13 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) |
1087 | } |
1088 | |
1089 | while (count < len - 1) { |
1090 | - burstcnt = min_t(int, get_burstcount(chip), len - count - 1); |
1091 | + burstcnt = get_burstcount(chip); |
1092 | + if (burstcnt < 0) { |
1093 | + dev_err(&chip->dev, "Unable to read burstcount\n"); |
1094 | + rc = burstcnt; |
1095 | + goto out_err; |
1096 | + } |
1097 | + burstcnt = min_t(int, burstcnt, len - count - 1); |
1098 | rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality), |
1099 | burstcnt, buf + count); |
1100 | if (rc < 0) |
1101 | diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c |
1102 | index f4fdac55727c..0621fbfb4beb 100644 |
1103 | --- a/drivers/clk/clk-wm831x.c |
1104 | +++ b/drivers/clk/clk-wm831x.c |
1105 | @@ -243,7 +243,7 @@ static int wm831x_clkout_is_prepared(struct clk_hw *hw) |
1106 | if (ret < 0) { |
1107 | dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n", |
1108 | ret); |
1109 | - return true; |
1110 | + return false; |
1111 | } |
1112 | |
1113 | return (ret & WM831X_CLKOUT_ENA) != 0; |
1114 | diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c |
1115 | index 6a964144a5b5..6a49ba2b9671 100644 |
1116 | --- a/drivers/clk/imx/clk-imx31.c |
1117 | +++ b/drivers/clk/imx/clk-imx31.c |
1118 | @@ -157,10 +157,8 @@ static void __init _mx31_clocks_init(unsigned long fref) |
1119 | } |
1120 | } |
1121 | |
1122 | -int __init mx31_clocks_init(void) |
1123 | +int __init mx31_clocks_init(unsigned long fref) |
1124 | { |
1125 | - u32 fref = 26000000; /* default */ |
1126 | - |
1127 | _mx31_clocks_init(fref); |
1128 | |
1129 | clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0"); |
1130 | diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c |
1131 | index 52a7d3959875..28eb200d0f1e 100644 |
1132 | --- a/drivers/clk/qcom/gcc-ipq806x.c |
1133 | +++ b/drivers/clk/qcom/gcc-ipq806x.c |
1134 | @@ -2990,11 +2990,11 @@ static int gcc_ipq806x_probe(struct platform_device *pdev) |
1135 | struct regmap *regmap; |
1136 | int ret; |
1137 | |
1138 | - ret = qcom_cc_register_board_clk(dev, "cxo_board", "cxo", 19200000); |
1139 | + ret = qcom_cc_register_board_clk(dev, "cxo_board", "cxo", 25000000); |
1140 | if (ret) |
1141 | return ret; |
1142 | |
1143 | - ret = qcom_cc_register_board_clk(dev, "pxo_board", "pxo", 27000000); |
1144 | + ret = qcom_cc_register_board_clk(dev, "pxo_board", "pxo", 25000000); |
1145 | if (ret) |
1146 | return ret; |
1147 | |
1148 | diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c |
1149 | index 9375777776d9..b533f99550e1 100644 |
1150 | --- a/drivers/clk/renesas/clk-mstp.c |
1151 | +++ b/drivers/clk/renesas/clk-mstp.c |
1152 | @@ -37,12 +37,14 @@ |
1153 | * @smstpcr: module stop control register |
1154 | * @mstpsr: module stop status register (optional) |
1155 | * @lock: protects writes to SMSTPCR |
1156 | + * @width_8bit: registers are 8-bit, not 32-bit |
1157 | */ |
1158 | struct mstp_clock_group { |
1159 | struct clk_onecell_data data; |
1160 | void __iomem *smstpcr; |
1161 | void __iomem *mstpsr; |
1162 | spinlock_t lock; |
1163 | + bool width_8bit; |
1164 | }; |
1165 | |
1166 | /** |
1167 | @@ -59,6 +61,18 @@ struct mstp_clock { |
1168 | |
1169 | #define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw) |
1170 | |
1171 | +static inline u32 cpg_mstp_read(struct mstp_clock_group *group, |
1172 | + u32 __iomem *reg) |
1173 | +{ |
1174 | + return group->width_8bit ? readb(reg) : clk_readl(reg); |
1175 | +} |
1176 | + |
1177 | +static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val, |
1178 | + u32 __iomem *reg) |
1179 | +{ |
1180 | + group->width_8bit ? writeb(val, reg) : clk_writel(val, reg); |
1181 | +} |
1182 | + |
1183 | static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) |
1184 | { |
1185 | struct mstp_clock *clock = to_mstp_clock(hw); |
1186 | @@ -70,12 +84,12 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) |
1187 | |
1188 | spin_lock_irqsave(&group->lock, flags); |
1189 | |
1190 | - value = clk_readl(group->smstpcr); |
1191 | + value = cpg_mstp_read(group, group->smstpcr); |
1192 | if (enable) |
1193 | value &= ~bitmask; |
1194 | else |
1195 | value |= bitmask; |
1196 | - clk_writel(value, group->smstpcr); |
1197 | + cpg_mstp_write(group, value, group->smstpcr); |
1198 | |
1199 | spin_unlock_irqrestore(&group->lock, flags); |
1200 | |
1201 | @@ -83,7 +97,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) |
1202 | return 0; |
1203 | |
1204 | for (i = 1000; i > 0; --i) { |
1205 | - if (!(clk_readl(group->mstpsr) & bitmask)) |
1206 | + if (!(cpg_mstp_read(group, group->mstpsr) & bitmask)) |
1207 | break; |
1208 | cpu_relax(); |
1209 | } |
1210 | @@ -114,9 +128,9 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw) |
1211 | u32 value; |
1212 | |
1213 | if (group->mstpsr) |
1214 | - value = clk_readl(group->mstpsr); |
1215 | + value = cpg_mstp_read(group, group->mstpsr); |
1216 | else |
1217 | - value = clk_readl(group->smstpcr); |
1218 | + value = cpg_mstp_read(group, group->smstpcr); |
1219 | |
1220 | return !(value & BIT(clock->bit_index)); |
1221 | } |
1222 | @@ -188,6 +202,9 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) |
1223 | return; |
1224 | } |
1225 | |
1226 | + if (of_device_is_compatible(np, "renesas,r7s72100-mstp-clocks")) |
1227 | + group->width_8bit = true; |
1228 | + |
1229 | for (i = 0; i < MSTP_MAX_CLOCKS; ++i) |
1230 | clks[i] = ERR_PTR(-ENOENT); |
1231 | |
1232 | diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c |
1233 | index e1365e7491ae..25c41cd9cdfc 100644 |
1234 | --- a/drivers/clk/renesas/renesas-cpg-mssr.c |
1235 | +++ b/drivers/clk/renesas/renesas-cpg-mssr.c |
1236 | @@ -33,9 +33,9 @@ |
1237 | #include "clk-div6.h" |
1238 | |
1239 | #ifdef DEBUG |
1240 | -#define WARN_DEBUG(x) do { } while (0) |
1241 | -#else |
1242 | #define WARN_DEBUG(x) WARN_ON(x) |
1243 | +#else |
1244 | +#define WARN_DEBUG(x) do { } while (0) |
1245 | #endif |
1246 | |
1247 | |
1248 | diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c |
1249 | index 2646d980087b..5c6d37bdf247 100644 |
1250 | --- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c |
1251 | +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c |
1252 | @@ -344,10 +344,10 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4, |
1253 | static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x", |
1254 | "pll-audio-2x", "pll-audio" }; |
1255 | static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents, |
1256 | - 0x0b0, 16, 2, BIT(31), 0); |
1257 | + 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT); |
1258 | |
1259 | static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents, |
1260 | - 0x0b4, 16, 2, BIT(31), 0); |
1261 | + 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT); |
1262 | |
1263 | /* TODO: the parent for most of the USB clocks is not known */ |
1264 | static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", |
1265 | @@ -415,7 +415,7 @@ static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", |
1266 | 0x13c, 16, 3, BIT(31), CLK_SET_RATE_PARENT); |
1267 | |
1268 | static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio", |
1269 | - 0x140, BIT(31), 0); |
1270 | + 0x140, BIT(31), CLK_SET_RATE_PARENT); |
1271 | static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", |
1272 | 0x144, BIT(31), 0); |
1273 | |
1274 | diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c |
1275 | index 4d70590f05e3..21c427d86f28 100644 |
1276 | --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c |
1277 | +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c |
1278 | @@ -394,16 +394,16 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4, |
1279 | static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x", |
1280 | "pll-audio-2x", "pll-audio" }; |
1281 | static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents, |
1282 | - 0x0b0, 16, 2, BIT(31), 0); |
1283 | + 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT); |
1284 | |
1285 | static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents, |
1286 | - 0x0b4, 16, 2, BIT(31), 0); |
1287 | + 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT); |
1288 | |
1289 | static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", i2s_parents, |
1290 | - 0x0b8, 16, 2, BIT(31), 0); |
1291 | + 0x0b8, 16, 2, BIT(31), CLK_SET_RATE_PARENT); |
1292 | |
1293 | static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio", |
1294 | - 0x0c0, 0, 4, BIT(31), 0); |
1295 | + 0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT); |
1296 | |
1297 | static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", |
1298 | 0x0cc, BIT(8), 0); |
1299 | @@ -466,7 +466,7 @@ static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", |
1300 | 0x13c, 16, 3, BIT(31), 0); |
1301 | |
1302 | static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio", |
1303 | - 0x140, BIT(31), 0); |
1304 | + 0x140, BIT(31), CLK_SET_RATE_PARENT); |
1305 | static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", |
1306 | 0x144, BIT(31), 0); |
1307 | |
1308 | diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c |
1309 | index bfa17d33ef3b..9fd6043314eb 100644 |
1310 | --- a/drivers/clk/ti/clk-7xx.c |
1311 | +++ b/drivers/clk/ti/clk-7xx.c |
1312 | @@ -201,7 +201,6 @@ static struct ti_dt_clk dra7xx_clks[] = { |
1313 | DT_CLK(NULL, "atl_dpll_clk_mux", "atl_dpll_clk_mux"), |
1314 | DT_CLK(NULL, "atl_gfclk_mux", "atl_gfclk_mux"), |
1315 | DT_CLK(NULL, "dcan1_sys_clk_mux", "dcan1_sys_clk_mux"), |
1316 | - DT_CLK(NULL, "gmac_gmii_ref_clk_div", "gmac_gmii_ref_clk_div"), |
1317 | DT_CLK(NULL, "gmac_rft_clk_mux", "gmac_rft_clk_mux"), |
1318 | DT_CLK(NULL, "gpu_core_gclk_mux", "gpu_core_gclk_mux"), |
1319 | DT_CLK(NULL, "gpu_hyd_gclk_mux", "gpu_hyd_gclk_mux"), |
1320 | diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c |
1321 | index 1ac199cd75e7..a4944e22f294 100644 |
1322 | --- a/drivers/firmware/efi/efi.c |
1323 | +++ b/drivers/firmware/efi/efi.c |
1324 | @@ -259,8 +259,10 @@ static __init int efivar_ssdt_load(void) |
1325 | } |
1326 | |
1327 | data = kmalloc(size, GFP_KERNEL); |
1328 | - if (!data) |
1329 | + if (!data) { |
1330 | + ret = -ENOMEM; |
1331 | goto free_entry; |
1332 | + } |
1333 | |
1334 | ret = efivar_entry_get(entry, NULL, &size, data); |
1335 | if (ret) { |
1336 | diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h |
1337 | index 974bd7bcc801..59ac90025552 100644 |
1338 | --- a/drivers/gpu/drm/i915/i915_gem_request.h |
1339 | +++ b/drivers/gpu/drm/i915/i915_gem_request.h |
1340 | @@ -344,6 +344,25 @@ i915_gem_active_set(struct i915_gem_active *active, |
1341 | rcu_assign_pointer(active->request, request); |
1342 | } |
1343 | |
1344 | +/** |
1345 | + * i915_gem_active_set_retire_fn - updates the retirement callback |
1346 | + * @active - the active tracker |
1347 | + * @fn - the routine called when the request is retired |
1348 | + * @mutex - struct_mutex used to guard retirements |
1349 | + * |
1350 | + * i915_gem_active_set_retire_fn() updates the function pointer that |
1351 | + * is called when the final request associated with the @active tracker |
1352 | + * is retired. |
1353 | + */ |
1354 | +static inline void |
1355 | +i915_gem_active_set_retire_fn(struct i915_gem_active *active, |
1356 | + i915_gem_retire_fn fn, |
1357 | + struct mutex *mutex) |
1358 | +{ |
1359 | + lockdep_assert_held(mutex); |
1360 | + active->retire = fn ?: i915_gem_retire_noop; |
1361 | +} |
1362 | + |
1363 | static inline struct drm_i915_gem_request * |
1364 | __i915_gem_active_peek(const struct i915_gem_active *active) |
1365 | { |
1366 | diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c |
1367 | index bf344d08356a..055525013d2f 100644 |
1368 | --- a/drivers/gpu/drm/i915/intel_dp.c |
1369 | +++ b/drivers/gpu/drm/i915/intel_dp.c |
1370 | @@ -280,7 +280,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, |
1371 | struct intel_dp *intel_dp); |
1372 | static void |
1373 | intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, |
1374 | - struct intel_dp *intel_dp); |
1375 | + struct intel_dp *intel_dp, |
1376 | + bool force_disable_vdd); |
1377 | static void |
1378 | intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp); |
1379 | |
1380 | @@ -442,7 +443,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) |
1381 | |
1382 | /* init power sequencer on this pipe and port */ |
1383 | intel_dp_init_panel_power_sequencer(dev, intel_dp); |
1384 | - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); |
1385 | + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true); |
1386 | |
1387 | /* |
1388 | * Even vdd force doesn't work until we've made |
1389 | @@ -479,7 +480,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) |
1390 | * Only the HW needs to be reprogrammed, the SW state is fixed and |
1391 | * has been setup during connector init. |
1392 | */ |
1393 | - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); |
1394 | + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); |
1395 | |
1396 | return 0; |
1397 | } |
1398 | @@ -562,7 +563,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) |
1399 | port_name(port), pipe_name(intel_dp->pps_pipe)); |
1400 | |
1401 | intel_dp_init_panel_power_sequencer(dev, intel_dp); |
1402 | - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); |
1403 | + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); |
1404 | } |
1405 | |
1406 | void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) |
1407 | @@ -2924,7 +2925,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) |
1408 | |
1409 | /* init power sequencer on this pipe and port */ |
1410 | intel_dp_init_panel_power_sequencer(dev, intel_dp); |
1411 | - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); |
1412 | + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true); |
1413 | } |
1414 | |
1415 | static void vlv_pre_enable_dp(struct intel_encoder *encoder, |
1416 | @@ -4017,6 +4018,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) |
1417 | if (!to_intel_crtc(intel_encoder->base.crtc)->active) |
1418 | return; |
1419 | |
1420 | + /* FIXME: we need to synchronize this sort of stuff with hardware |
1421 | + * readout. Currently fast link training doesn't work on boot-up. */ |
1422 | + if (!intel_dp->lane_count) |
1423 | + return; |
1424 | + |
1425 | /* if link training is requested we should perform it always */ |
1426 | if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) || |
1427 | (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) { |
1428 | @@ -5054,7 +5060,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, |
1429 | |
1430 | static void |
1431 | intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, |
1432 | - struct intel_dp *intel_dp) |
1433 | + struct intel_dp *intel_dp, |
1434 | + bool force_disable_vdd) |
1435 | { |
1436 | struct drm_i915_private *dev_priv = to_i915(dev); |
1437 | u32 pp_on, pp_off, pp_div, port_sel = 0; |
1438 | @@ -5067,6 +5074,31 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, |
1439 | |
1440 | intel_pps_get_registers(dev_priv, intel_dp, ®s); |
1441 | |
1442 | + /* |
1443 | + * On some VLV machines the BIOS can leave the VDD |
1444 | + * enabled even on power seqeuencers which aren't |
1445 | + * hooked up to any port. This would mess up the |
1446 | + * power domain tracking the first time we pick |
1447 | + * one of these power sequencers for use since |
1448 | + * edp_panel_vdd_on() would notice that the VDD was |
1449 | + * already on and therefore wouldn't grab the power |
1450 | + * domain reference. Disable VDD first to avoid this. |
1451 | + * This also avoids spuriously turning the VDD on as |
1452 | + * soon as the new power seqeuencer gets initialized. |
1453 | + */ |
1454 | + if (force_disable_vdd) { |
1455 | + u32 pp = ironlake_get_pp_control(intel_dp); |
1456 | + |
1457 | + WARN(pp & PANEL_POWER_ON, "Panel power already on\n"); |
1458 | + |
1459 | + if (pp & EDP_FORCE_VDD) |
1460 | + DRM_DEBUG_KMS("VDD already on, disabling first\n"); |
1461 | + |
1462 | + pp &= ~EDP_FORCE_VDD; |
1463 | + |
1464 | + I915_WRITE(regs.pp_ctrl, pp); |
1465 | + } |
1466 | + |
1467 | pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | |
1468 | (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); |
1469 | pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | |
1470 | @@ -5119,7 +5151,7 @@ static void intel_dp_pps_init(struct drm_device *dev, |
1471 | vlv_initial_power_sequencer_setup(intel_dp); |
1472 | } else { |
1473 | intel_dp_init_panel_power_sequencer(dev, intel_dp); |
1474 | - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); |
1475 | + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); |
1476 | } |
1477 | } |
1478 | |
1479 | diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c |
1480 | index a24bc8c7889f..a2655cd5a84e 100644 |
1481 | --- a/drivers/gpu/drm/i915/intel_overlay.c |
1482 | +++ b/drivers/gpu/drm/i915/intel_overlay.c |
1483 | @@ -216,7 +216,8 @@ static void intel_overlay_submit_request(struct intel_overlay *overlay, |
1484 | { |
1485 | GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip, |
1486 | &overlay->i915->drm.struct_mutex)); |
1487 | - overlay->last_flip.retire = retire; |
1488 | + i915_gem_active_set_retire_fn(&overlay->last_flip, retire, |
1489 | + &overlay->i915->drm.struct_mutex); |
1490 | i915_gem_active_set(&overlay->last_flip, req); |
1491 | i915_add_request(req); |
1492 | } |
1493 | @@ -839,8 +840,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, |
1494 | if (ret) |
1495 | goto out_unpin; |
1496 | |
1497 | - i915_gem_track_fb(overlay->vma->obj, new_bo, |
1498 | - INTEL_FRONTBUFFER_OVERLAY(pipe)); |
1499 | + i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL, |
1500 | + vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe)); |
1501 | |
1502 | overlay->old_vma = overlay->vma; |
1503 | overlay->vma = vma; |
1504 | @@ -1430,6 +1431,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) |
1505 | overlay->contrast = 75; |
1506 | overlay->saturation = 146; |
1507 | |
1508 | + init_request_active(&overlay->last_flip, NULL); |
1509 | + |
1510 | regs = intel_overlay_map_regs(overlay); |
1511 | if (!regs) |
1512 | goto out_unpin_bo; |
1513 | diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c |
1514 | index db24f898853c..103cefdb9ddd 100644 |
1515 | --- a/drivers/gpu/drm/i915/intel_pm.c |
1516 | +++ b/drivers/gpu/drm/i915/intel_pm.c |
1517 | @@ -2879,6 +2879,21 @@ skl_wm_plane_id(const struct intel_plane *plane) |
1518 | } |
1519 | } |
1520 | |
1521 | +/* |
1522 | + * FIXME: We still don't have the proper code detect if we need to apply the WA, |
1523 | + * so assume we'll always need it in order to avoid underruns. |
1524 | + */ |
1525 | +static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state) |
1526 | +{ |
1527 | + struct drm_i915_private *dev_priv = to_i915(state->base.dev); |
1528 | + |
1529 | + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || |
1530 | + IS_KABYLAKE(dev_priv)) |
1531 | + return true; |
1532 | + |
1533 | + return false; |
1534 | +} |
1535 | + |
1536 | static bool |
1537 | intel_has_sagv(struct drm_i915_private *dev_priv) |
1538 | { |
1539 | @@ -2999,9 +3014,10 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) |
1540 | struct drm_device *dev = state->dev; |
1541 | struct drm_i915_private *dev_priv = to_i915(dev); |
1542 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); |
1543 | - struct drm_crtc *crtc; |
1544 | + struct intel_crtc *crtc; |
1545 | + struct intel_plane *plane; |
1546 | enum pipe pipe; |
1547 | - int level, plane; |
1548 | + int level, id, latency; |
1549 | |
1550 | if (!intel_has_sagv(dev_priv)) |
1551 | return false; |
1552 | @@ -3019,27 +3035,36 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) |
1553 | |
1554 | /* Since we're now guaranteed to only have one active CRTC... */ |
1555 | pipe = ffs(intel_state->active_crtcs) - 1; |
1556 | - crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
1557 | + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
1558 | |
1559 | - if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE) |
1560 | + if (crtc->base.state->mode.flags & DRM_MODE_FLAG_INTERLACE) |
1561 | return false; |
1562 | |
1563 | - for_each_plane(dev_priv, pipe, plane) { |
1564 | + for_each_intel_plane_on_crtc(dev, crtc, plane) { |
1565 | + id = skl_wm_plane_id(plane); |
1566 | + |
1567 | /* Skip this plane if it's not enabled */ |
1568 | - if (intel_state->wm_results.plane[pipe][plane][0] == 0) |
1569 | + if (intel_state->wm_results.plane[pipe][id][0] == 0) |
1570 | continue; |
1571 | |
1572 | /* Find the highest enabled wm level for this plane */ |
1573 | for (level = ilk_wm_max_level(dev); |
1574 | - intel_state->wm_results.plane[pipe][plane][level] == 0; --level) |
1575 | + intel_state->wm_results.plane[pipe][id][level] == 0; --level) |
1576 | { } |
1577 | |
1578 | + latency = dev_priv->wm.skl_latency[level]; |
1579 | + |
1580 | + if (skl_needs_memory_bw_wa(intel_state) && |
1581 | + plane->base.state->fb->modifier[0] == |
1582 | + I915_FORMAT_MOD_X_TILED) |
1583 | + latency += 15; |
1584 | + |
1585 | /* |
1586 | * If any of the planes on this pipe don't enable wm levels |
1587 | * that incur memory latencies higher then 30µs we can't enable |
1588 | * the SAGV |
1589 | */ |
1590 | - if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME) |
1591 | + if (latency < SKL_SAGV_BLOCK_TIME) |
1592 | return false; |
1593 | } |
1594 | |
1595 | @@ -3549,12 +3574,18 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, |
1596 | uint32_t width = 0, height = 0; |
1597 | uint32_t plane_pixel_rate; |
1598 | uint32_t y_tile_minimum, y_min_scanlines; |
1599 | + struct intel_atomic_state *state = |
1600 | + to_intel_atomic_state(cstate->base.state); |
1601 | + bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); |
1602 | |
1603 | if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) { |
1604 | *enabled = false; |
1605 | return 0; |
1606 | } |
1607 | |
1608 | + if (apply_memory_bw_wa && fb->modifier[0] == I915_FORMAT_MOD_X_TILED) |
1609 | + latency += 15; |
1610 | + |
1611 | width = drm_rect_width(&intel_pstate->base.src) >> 16; |
1612 | height = drm_rect_height(&intel_pstate->base.src) >> 16; |
1613 | |
1614 | @@ -3586,6 +3617,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, |
1615 | y_min_scanlines = 4; |
1616 | } |
1617 | |
1618 | + if (apply_memory_bw_wa) |
1619 | + y_min_scanlines *= 2; |
1620 | + |
1621 | plane_bytes_per_line = width * cpp; |
1622 | if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || |
1623 | fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { |
1624 | diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c |
1625 | index 108ba1e5d658..9b307cee3008 100644 |
1626 | --- a/drivers/gpu/drm/i915/intel_psr.c |
1627 | +++ b/drivers/gpu/drm/i915/intel_psr.c |
1628 | @@ -825,13 +825,9 @@ void intel_psr_init(struct drm_device *dev) |
1629 | dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? |
1630 | HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; |
1631 | |
1632 | - /* Per platform default */ |
1633 | - if (i915.enable_psr == -1) { |
1634 | - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
1635 | - i915.enable_psr = 1; |
1636 | - else |
1637 | - i915.enable_psr = 0; |
1638 | - } |
1639 | + /* Per platform default: all disabled. */ |
1640 | + if (i915.enable_psr == -1) |
1641 | + i915.enable_psr = 0; |
1642 | |
1643 | /* Set link_standby x link_off defaults */ |
1644 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
1645 | diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c |
1646 | index 87a72476d313..fb16070b266e 100644 |
1647 | --- a/drivers/gpu/drm/radeon/radeon_cursor.c |
1648 | +++ b/drivers/gpu/drm/radeon/radeon_cursor.c |
1649 | @@ -146,6 +146,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y) |
1650 | int xorigin = 0, yorigin = 0; |
1651 | int w = radeon_crtc->cursor_width; |
1652 | |
1653 | + radeon_crtc->cursor_x = x; |
1654 | + radeon_crtc->cursor_y = y; |
1655 | + |
1656 | if (ASIC_IS_AVIVO(rdev)) { |
1657 | /* avivo cursor are offset into the total surface */ |
1658 | x += crtc->x; |
1659 | @@ -240,9 +243,6 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y) |
1660 | yorigin * 256); |
1661 | } |
1662 | |
1663 | - radeon_crtc->cursor_x = x; |
1664 | - radeon_crtc->cursor_y = y; |
1665 | - |
1666 | if (radeon_crtc->cursor_out_of_bounds) { |
1667 | radeon_crtc->cursor_out_of_bounds = false; |
1668 | if (radeon_crtc->cursor_bo) |
1669 | diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c |
1670 | index 60875625cbdf..8f6c35370f66 100644 |
1671 | --- a/drivers/hid/hid-sensor-hub.c |
1672 | +++ b/drivers/hid/hid-sensor-hub.c |
1673 | @@ -212,7 +212,6 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, |
1674 | __s32 value; |
1675 | int ret = 0; |
1676 | |
1677 | - memset(buffer, 0, buffer_size); |
1678 | mutex_lock(&data->mutex); |
1679 | report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); |
1680 | if (!report || (field_index >= report->maxfield)) { |
1681 | @@ -256,6 +255,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, |
1682 | int buffer_index = 0; |
1683 | int i; |
1684 | |
1685 | + memset(buffer, 0, buffer_size); |
1686 | + |
1687 | mutex_lock(&data->mutex); |
1688 | report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); |
1689 | if (!report || (field_index >= report->maxfield) || |
1690 | diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c |
1691 | index 12e851a5af48..46b4e35fd555 100644 |
1692 | --- a/drivers/hwmon/amc6821.c |
1693 | +++ b/drivers/hwmon/amc6821.c |
1694 | @@ -188,8 +188,8 @@ static struct amc6821_data *amc6821_update_device(struct device *dev) |
1695 | !data->valid) { |
1696 | |
1697 | for (i = 0; i < TEMP_IDX_LEN; i++) |
1698 | - data->temp[i] = i2c_smbus_read_byte_data(client, |
1699 | - temp_reg[i]); |
1700 | + data->temp[i] = (int8_t)i2c_smbus_read_byte_data( |
1701 | + client, temp_reg[i]); |
1702 | |
1703 | data->stat1 = i2c_smbus_read_byte_data(client, |
1704 | AMC6821_REG_STAT1); |
1705 | diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c |
1706 | index edf550fc4eef..0043a4c02b85 100644 |
1707 | --- a/drivers/hwmon/ds620.c |
1708 | +++ b/drivers/hwmon/ds620.c |
1709 | @@ -166,7 +166,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da, |
1710 | if (res) |
1711 | return res; |
1712 | |
1713 | - val = (val * 10 / 625) * 8; |
1714 | + val = (clamp_val(val, -128000, 128000) * 10 / 625) * 8; |
1715 | |
1716 | mutex_lock(&data->update_lock); |
1717 | data->temp[attr->index] = val; |
1718 | diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c |
1719 | index b96a2a9e4df7..628be9c95ff9 100644 |
1720 | --- a/drivers/hwmon/g762.c |
1721 | +++ b/drivers/hwmon/g762.c |
1722 | @@ -193,14 +193,17 @@ static inline unsigned int rpm_from_cnt(u8 cnt, u32 clk_freq, u16 p, |
1723 | * Convert fan RPM value from sysfs into count value for fan controller |
1724 | * register (FAN_SET_CNT). |
1725 | */ |
1726 | -static inline unsigned char cnt_from_rpm(u32 rpm, u32 clk_freq, u16 p, |
1727 | +static inline unsigned char cnt_from_rpm(unsigned long rpm, u32 clk_freq, u16 p, |
1728 | u8 clk_div, u8 gear_mult) |
1729 | { |
1730 | - if (!rpm) /* to stop the fan, set cnt to 255 */ |
1731 | + unsigned long f1 = clk_freq * 30 * gear_mult; |
1732 | + unsigned long f2 = p * clk_div; |
1733 | + |
1734 | + if (!rpm) /* to stop the fan, set cnt to 255 */ |
1735 | return 0xff; |
1736 | |
1737 | - return clamp_val(((clk_freq * 30 * gear_mult) / (rpm * p * clk_div)), |
1738 | - 0, 255); |
1739 | + rpm = clamp_val(rpm, f1 / (255 * f2), ULONG_MAX / f2); |
1740 | + return DIV_ROUND_CLOSEST(f1, rpm * f2); |
1741 | } |
1742 | |
1743 | /* helper to grab and cache data, at most one time per second */ |
1744 | diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c |
1745 | index 322ed9272811..841f2428e84a 100644 |
1746 | --- a/drivers/hwmon/lm90.c |
1747 | +++ b/drivers/hwmon/lm90.c |
1748 | @@ -1036,7 +1036,7 @@ static const u8 lm90_temp_emerg_index[3] = { |
1749 | }; |
1750 | |
1751 | static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 }; |
1752 | -static const u8 lm90_max_alarm_bits[3] = { 0, 4, 12 }; |
1753 | +static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 }; |
1754 | static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 }; |
1755 | static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 }; |
1756 | static const u8 lm90_fault_bits[3] = { 0, 2, 10 }; |
1757 | diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c |
1758 | index 3ce33d244cc0..12b94b094c0d 100644 |
1759 | --- a/drivers/hwmon/nct7802.c |
1760 | +++ b/drivers/hwmon/nct7802.c |
1761 | @@ -259,13 +259,15 @@ static int nct7802_read_fan_min(struct nct7802_data *data, u8 reg_fan_low, |
1762 | ret = 0; |
1763 | else if (ret) |
1764 | ret = DIV_ROUND_CLOSEST(1350000U, ret); |
1765 | + else |
1766 | + ret = 1350000U; |
1767 | abort: |
1768 | mutex_unlock(&data->access_lock); |
1769 | return ret; |
1770 | } |
1771 | |
1772 | static int nct7802_write_fan_min(struct nct7802_data *data, u8 reg_fan_low, |
1773 | - u8 reg_fan_high, unsigned int limit) |
1774 | + u8 reg_fan_high, unsigned long limit) |
1775 | { |
1776 | int err; |
1777 | |
1778 | @@ -326,8 +328,8 @@ static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index, |
1779 | int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr]; |
1780 | int err; |
1781 | |
1782 | + voltage = clamp_val(voltage, 0, 0x3ff * nct7802_vmul[nr]); |
1783 | voltage = DIV_ROUND_CLOSEST(voltage, nct7802_vmul[nr]); |
1784 | - voltage = clamp_val(voltage, 0, 0x3ff); |
1785 | |
1786 | mutex_lock(&data->access_lock); |
1787 | err = regmap_write(data->regmap, |
1788 | @@ -402,7 +404,7 @@ static ssize_t store_temp(struct device *dev, struct device_attribute *attr, |
1789 | if (err < 0) |
1790 | return err; |
1791 | |
1792 | - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); |
1793 | + val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000); |
1794 | |
1795 | err = regmap_write(data->regmap, nr, val & 0xff); |
1796 | return err ? : count; |
1797 | diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c |
1798 | index 559a3dcd64d8..094f948f99ff 100644 |
1799 | --- a/drivers/hwmon/scpi-hwmon.c |
1800 | +++ b/drivers/hwmon/scpi-hwmon.c |
1801 | @@ -251,6 +251,7 @@ static const struct of_device_id scpi_of_match[] = { |
1802 | {.compatible = "arm,scpi-sensors"}, |
1803 | {}, |
1804 | }; |
1805 | +MODULE_DEVICE_TABLE(of, scpi_of_match); |
1806 | |
1807 | static struct platform_driver scpi_hwmon_platdrv = { |
1808 | .driver = { |
1809 | diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c |
1810 | index ce69048c88e9..3a557e3181ea 100644 |
1811 | --- a/drivers/iio/accel/st_accel_core.c |
1812 | +++ b/drivers/iio/accel/st_accel_core.c |
1813 | @@ -154,8 +154,8 @@ |
1814 | #define ST_ACCEL_4_FS_MASK 0x80 |
1815 | #define ST_ACCEL_4_FS_AVL_2_VAL 0X00 |
1816 | #define ST_ACCEL_4_FS_AVL_6_VAL 0X01 |
1817 | -#define ST_ACCEL_4_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1024) |
1818 | -#define ST_ACCEL_4_FS_AVL_6_GAIN IIO_G_TO_M_S_2(340) |
1819 | +#define ST_ACCEL_4_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000) |
1820 | +#define ST_ACCEL_4_FS_AVL_6_GAIN IIO_G_TO_M_S_2(3000) |
1821 | #define ST_ACCEL_4_BDU_ADDR 0x21 |
1822 | #define ST_ACCEL_4_BDU_MASK 0x40 |
1823 | #define ST_ACCEL_4_DRDY_IRQ_ADDR 0x21 |
1824 | @@ -346,6 +346,14 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { |
1825 | .addr = ST_ACCEL_1_BDU_ADDR, |
1826 | .mask = ST_ACCEL_1_BDU_MASK, |
1827 | }, |
1828 | + /* |
1829 | + * Data Alignment Setting - needs to be set to get |
1830 | + * left-justified data like all other sensors. |
1831 | + */ |
1832 | + .das = { |
1833 | + .addr = 0x21, |
1834 | + .mask = 0x01, |
1835 | + }, |
1836 | .drdy_irq = { |
1837 | .addr = ST_ACCEL_1_DRDY_IRQ_ADDR, |
1838 | .mask_int1 = ST_ACCEL_1_DRDY_IRQ_INT1_MASK, |
1839 | diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c |
1840 | index fe7775bb3740..df4045203a07 100644 |
1841 | --- a/drivers/iio/common/st_sensors/st_sensors_buffer.c |
1842 | +++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c |
1843 | @@ -30,7 +30,9 @@ static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf) |
1844 | |
1845 | for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) { |
1846 | const struct iio_chan_spec *channel = &indio_dev->channels[i]; |
1847 | - unsigned int bytes_to_read = channel->scan_type.realbits >> 3; |
1848 | + unsigned int bytes_to_read = |
1849 | + DIV_ROUND_UP(channel->scan_type.realbits + |
1850 | + channel->scan_type.shift, 8); |
1851 | unsigned int storage_bytes = |
1852 | channel->scan_type.storagebits >> 3; |
1853 | |
1854 | diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c |
1855 | index 975a1f19f747..79c8c7cd70d5 100644 |
1856 | --- a/drivers/iio/common/st_sensors/st_sensors_core.c |
1857 | +++ b/drivers/iio/common/st_sensors/st_sensors_core.c |
1858 | @@ -401,6 +401,15 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, |
1859 | return err; |
1860 | } |
1861 | |
1862 | + /* set DAS */ |
1863 | + if (sdata->sensor_settings->das.addr) { |
1864 | + err = st_sensors_write_data_with_mask(indio_dev, |
1865 | + sdata->sensor_settings->das.addr, |
1866 | + sdata->sensor_settings->das.mask, 1); |
1867 | + if (err < 0) |
1868 | + return err; |
1869 | + } |
1870 | + |
1871 | if (sdata->int_pin_open_drain) { |
1872 | dev_info(&indio_dev->dev, |
1873 | "set interrupt line to open drain mode\n"); |
1874 | @@ -483,8 +492,10 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev, |
1875 | int err; |
1876 | u8 *outdata; |
1877 | struct st_sensor_data *sdata = iio_priv(indio_dev); |
1878 | - unsigned int byte_for_channel = ch->scan_type.realbits >> 3; |
1879 | + unsigned int byte_for_channel; |
1880 | |
1881 | + byte_for_channel = DIV_ROUND_UP(ch->scan_type.realbits + |
1882 | + ch->scan_type.shift, 8); |
1883 | outdata = kmalloc(byte_for_channel, GFP_KERNEL); |
1884 | if (!outdata) |
1885 | return -ENOMEM; |
1886 | diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c |
1887 | index e0251b8c1a52..5fb571d03153 100644 |
1888 | --- a/drivers/iio/imu/bmi160/bmi160_core.c |
1889 | +++ b/drivers/iio/imu/bmi160/bmi160_core.c |
1890 | @@ -66,10 +66,8 @@ |
1891 | |
1892 | #define BMI160_REG_DUMMY 0x7F |
1893 | |
1894 | -#define BMI160_ACCEL_PMU_MIN_USLEEP 3200 |
1895 | -#define BMI160_ACCEL_PMU_MAX_USLEEP 3800 |
1896 | -#define BMI160_GYRO_PMU_MIN_USLEEP 55000 |
1897 | -#define BMI160_GYRO_PMU_MAX_USLEEP 80000 |
1898 | +#define BMI160_ACCEL_PMU_MIN_USLEEP 3800 |
1899 | +#define BMI160_GYRO_PMU_MIN_USLEEP 80000 |
1900 | #define BMI160_SOFTRESET_USLEEP 1000 |
1901 | |
1902 | #define BMI160_CHANNEL(_type, _axis, _index) { \ |
1903 | @@ -151,20 +149,9 @@ static struct bmi160_regs bmi160_regs[] = { |
1904 | }, |
1905 | }; |
1906 | |
1907 | -struct bmi160_pmu_time { |
1908 | - unsigned long min; |
1909 | - unsigned long max; |
1910 | -}; |
1911 | - |
1912 | -static struct bmi160_pmu_time bmi160_pmu_time[] = { |
1913 | - [BMI160_ACCEL] = { |
1914 | - .min = BMI160_ACCEL_PMU_MIN_USLEEP, |
1915 | - .max = BMI160_ACCEL_PMU_MAX_USLEEP |
1916 | - }, |
1917 | - [BMI160_GYRO] = { |
1918 | - .min = BMI160_GYRO_PMU_MIN_USLEEP, |
1919 | - .max = BMI160_GYRO_PMU_MIN_USLEEP, |
1920 | - }, |
1921 | +static unsigned long bmi160_pmu_time[] = { |
1922 | + [BMI160_ACCEL] = BMI160_ACCEL_PMU_MIN_USLEEP, |
1923 | + [BMI160_GYRO] = BMI160_GYRO_PMU_MIN_USLEEP, |
1924 | }; |
1925 | |
1926 | struct bmi160_scale { |
1927 | @@ -289,7 +276,7 @@ int bmi160_set_mode(struct bmi160_data *data, enum bmi160_sensor_type t, |
1928 | if (ret < 0) |
1929 | return ret; |
1930 | |
1931 | - usleep_range(bmi160_pmu_time[t].min, bmi160_pmu_time[t].max); |
1932 | + usleep_range(bmi160_pmu_time[t], bmi160_pmu_time[t] + 1000); |
1933 | |
1934 | return 0; |
1935 | } |
1936 | diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c |
1937 | index 6511b20a2a29..a8ffa432bf0d 100644 |
1938 | --- a/drivers/iio/light/max44000.c |
1939 | +++ b/drivers/iio/light/max44000.c |
1940 | @@ -113,7 +113,7 @@ static const char max44000_int_time_avail_str[] = |
1941 | "0.100 " |
1942 | "0.025 " |
1943 | "0.00625 " |
1944 | - "0.001625"; |
1945 | + "0.0015625"; |
1946 | |
1947 | /* Available scales (internal to ulux) with pretty manual alignment: */ |
1948 | static const int max44000_scale_avail_ulux_array[] = { |
1949 | diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c |
1950 | index cf805b960866..2e934aef3d2a 100644 |
1951 | --- a/drivers/input/rmi4/rmi_f54.c |
1952 | +++ b/drivers/input/rmi4/rmi_f54.c |
1953 | @@ -200,7 +200,7 @@ static int rmi_f54_request_report(struct rmi_function *fn, u8 report_type) |
1954 | |
1955 | error = rmi_write(rmi_dev, fn->fd.command_base_addr, F54_GET_REPORT); |
1956 | if (error < 0) |
1957 | - return error; |
1958 | + goto unlock; |
1959 | |
1960 | init_completion(&f54->cmd_done); |
1961 | |
1962 | @@ -209,9 +209,10 @@ static int rmi_f54_request_report(struct rmi_function *fn, u8 report_type) |
1963 | |
1964 | queue_delayed_work(f54->workqueue, &f54->work, 0); |
1965 | |
1966 | +unlock: |
1967 | mutex_unlock(&f54->data_mutex); |
1968 | |
1969 | - return 0; |
1970 | + return error; |
1971 | } |
1972 | |
1973 | static size_t rmi_f54_get_report_size(struct f54_data *f54) |
1974 | diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c |
1975 | index 754595ee11b6..11a13b5be73a 100644 |
1976 | --- a/drivers/iommu/amd_iommu.c |
1977 | +++ b/drivers/iommu/amd_iommu.c |
1978 | @@ -1021,7 +1021,7 @@ static int __iommu_queue_command_sync(struct amd_iommu *iommu, |
1979 | next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; |
1980 | left = (head - next_tail) % CMD_BUFFER_SIZE; |
1981 | |
1982 | - if (left <= 2) { |
1983 | + if (left <= 0x20) { |
1984 | struct iommu_cmd sync_cmd; |
1985 | int ret; |
1986 | |
1987 | diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c |
1988 | index 594849a3a9be..f8ed8c95b685 100644 |
1989 | --- a/drivers/iommu/amd_iommu_v2.c |
1990 | +++ b/drivers/iommu/amd_iommu_v2.c |
1991 | @@ -805,8 +805,10 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) |
1992 | goto out_free_domain; |
1993 | |
1994 | group = iommu_group_get(&pdev->dev); |
1995 | - if (!group) |
1996 | + if (!group) { |
1997 | + ret = -EINVAL; |
1998 | goto out_free_domain; |
1999 | + } |
2000 | |
2001 | ret = iommu_attach_group(dev_state->domain, group); |
2002 | if (ret != 0) |
2003 | diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
2004 | index d8376c2d18b3..d82637ab09fd 100644 |
2005 | --- a/drivers/iommu/intel-iommu.c |
2006 | +++ b/drivers/iommu/intel-iommu.c |
2007 | @@ -2037,6 +2037,25 @@ static int domain_context_mapping_one(struct dmar_domain *domain, |
2008 | if (context_present(context)) |
2009 | goto out_unlock; |
2010 | |
2011 | + /* |
2012 | + * For kdump cases, old valid entries may be cached due to the |
2013 | + * in-flight DMA and copied pgtable, but there is no unmapping |
2014 | + * behaviour for them, thus we need an explicit cache flush for |
2015 | + * the newly-mapped device. For kdump, at this point, the device |
2016 | + * is supposed to finish reset at its driver probe stage, so no |
2017 | + * in-flight DMA will exist, and we don't need to worry anymore |
2018 | + * hereafter. |
2019 | + */ |
2020 | + if (context_copied(context)) { |
2021 | + u16 did_old = context_domain_id(context); |
2022 | + |
2023 | + if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) |
2024 | + iommu->flush.flush_context(iommu, did_old, |
2025 | + (((u16)bus) << 8) | devfn, |
2026 | + DMA_CCMD_MASK_NOBIT, |
2027 | + DMA_CCMD_DEVICE_INVL); |
2028 | + } |
2029 | + |
2030 | pgd = domain->pgd; |
2031 | |
2032 | context_clear_entry(context); |
2033 | @@ -5197,6 +5216,25 @@ static void intel_iommu_remove_device(struct device *dev) |
2034 | } |
2035 | |
2036 | #ifdef CONFIG_INTEL_IOMMU_SVM |
2037 | +#define MAX_NR_PASID_BITS (20) |
2038 | +static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu) |
2039 | +{ |
2040 | + /* |
2041 | + * Convert ecap_pss to extend context entry pts encoding, also |
2042 | + * respect the soft pasid_max value set by the iommu. |
2043 | + * - number of PASID bits = ecap_pss + 1 |
2044 | + * - number of PASID table entries = 2^(pts + 5) |
2045 | + * Therefore, pts = ecap_pss - 4 |
2046 | + * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15 |
2047 | + */ |
2048 | + if (ecap_pss(iommu->ecap) < 5) |
2049 | + return 0; |
2050 | + |
2051 | + /* pasid_max is encoded as actual number of entries not the bits */ |
2052 | + return find_first_bit((unsigned long *)&iommu->pasid_max, |
2053 | + MAX_NR_PASID_BITS) - 5; |
2054 | +} |
2055 | + |
2056 | int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev) |
2057 | { |
2058 | struct device_domain_info *info; |
2059 | @@ -5229,7 +5267,9 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd |
2060 | |
2061 | if (!(ctx_lo & CONTEXT_PASIDE)) { |
2062 | context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table); |
2063 | - context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap); |
2064 | + context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | |
2065 | + intel_iommu_get_pts(iommu); |
2066 | + |
2067 | wmb(); |
2068 | /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both |
2069 | * extended to permit requests-with-PASID if the PASIDE bit |
2070 | diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c |
2071 | index 353c54986211..c2662a1bfdd3 100644 |
2072 | --- a/drivers/irqchip/irq-bcm7038-l1.c |
2073 | +++ b/drivers/irqchip/irq-bcm7038-l1.c |
2074 | @@ -215,6 +215,31 @@ static int bcm7038_l1_set_affinity(struct irq_data *d, |
2075 | return 0; |
2076 | } |
2077 | |
2078 | +static void bcm7038_l1_cpu_offline(struct irq_data *d) |
2079 | +{ |
2080 | + struct cpumask *mask = irq_data_get_affinity_mask(d); |
2081 | + int cpu = smp_processor_id(); |
2082 | + cpumask_t new_affinity; |
2083 | + |
2084 | + /* This CPU was not on the affinity mask */ |
2085 | + if (!cpumask_test_cpu(cpu, mask)) |
2086 | + return; |
2087 | + |
2088 | + if (cpumask_weight(mask) > 1) { |
2089 | + /* |
2090 | + * Multiple CPU affinity, remove this CPU from the affinity |
2091 | + * mask |
2092 | + */ |
2093 | + cpumask_copy(&new_affinity, mask); |
2094 | + cpumask_clear_cpu(cpu, &new_affinity); |
2095 | + } else { |
2096 | + /* Only CPU, put on the lowest online CPU */ |
2097 | + cpumask_clear(&new_affinity); |
2098 | + cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); |
2099 | + } |
2100 | + irq_set_affinity_locked(d, &new_affinity, false); |
2101 | +} |
2102 | + |
2103 | static int __init bcm7038_l1_init_one(struct device_node *dn, |
2104 | unsigned int idx, |
2105 | struct bcm7038_l1_chip *intc) |
2106 | @@ -266,6 +291,7 @@ static struct irq_chip bcm7038_l1_irq_chip = { |
2107 | .irq_mask = bcm7038_l1_mask, |
2108 | .irq_unmask = bcm7038_l1_unmask, |
2109 | .irq_set_affinity = bcm7038_l1_set_affinity, |
2110 | + .irq_cpu_offline = bcm7038_l1_cpu_offline, |
2111 | }; |
2112 | |
2113 | static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq, |
2114 | diff --git a/drivers/md/md.c b/drivers/md/md.c |
2115 | index 2089d46b0eb8..24925f2aa235 100644 |
2116 | --- a/drivers/md/md.c |
2117 | +++ b/drivers/md/md.c |
2118 | @@ -6829,7 +6829,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, |
2119 | /* need to ensure recovery thread has run */ |
2120 | wait_event_interruptible_timeout(mddev->sb_wait, |
2121 | !test_bit(MD_RECOVERY_NEEDED, |
2122 | - &mddev->flags), |
2123 | + &mddev->recovery), |
2124 | msecs_to_jiffies(5000)); |
2125 | if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { |
2126 | /* Need to flush page cache, and ensure no-one else opens |
2127 | @@ -7092,7 +7092,8 @@ static int md_open(struct block_device *bdev, fmode_t mode) |
2128 | |
2129 | if (test_bit(MD_CLOSING, &mddev->flags)) { |
2130 | mutex_unlock(&mddev->open_mutex); |
2131 | - return -ENODEV; |
2132 | + err = -ENODEV; |
2133 | + goto out; |
2134 | } |
2135 | |
2136 | err = 0; |
2137 | @@ -7101,6 +7102,8 @@ static int md_open(struct block_device *bdev, fmode_t mode) |
2138 | |
2139 | check_disk_change(bdev); |
2140 | out: |
2141 | + if (err) |
2142 | + mddev_put(mddev); |
2143 | return err; |
2144 | } |
2145 | |
2146 | diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c |
2147 | index de3ee2547479..8207e6900656 100644 |
2148 | --- a/drivers/media/usb/dvb-usb/dibusb-common.c |
2149 | +++ b/drivers/media/usb/dvb-usb/dibusb-common.c |
2150 | @@ -382,9 +382,9 @@ int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state) |
2151 | if (buf[0] != 0) |
2152 | deb_info("key: %*ph\n", 5, buf); |
2153 | |
2154 | +ret: |
2155 | kfree(buf); |
2156 | |
2157 | -ret: |
2158 | return ret; |
2159 | } |
2160 | EXPORT_SYMBOL(dibusb_rc_query); |
2161 | diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c |
2162 | index 9a4d8684dd32..df2e7756927f 100644 |
2163 | --- a/drivers/mfd/tps65217.c |
2164 | +++ b/drivers/mfd/tps65217.c |
2165 | @@ -424,6 +424,24 @@ static int tps65217_probe(struct i2c_client *client, |
2166 | return 0; |
2167 | } |
2168 | |
2169 | +static int tps65217_remove(struct i2c_client *client) |
2170 | +{ |
2171 | + struct tps65217 *tps = i2c_get_clientdata(client); |
2172 | + unsigned int virq; |
2173 | + int i; |
2174 | + |
2175 | + for (i = 0; i < ARRAY_SIZE(tps65217_irqs); i++) { |
2176 | + virq = irq_find_mapping(tps->irq_domain, i); |
2177 | + if (virq) |
2178 | + irq_dispose_mapping(virq); |
2179 | + } |
2180 | + |
2181 | + irq_domain_remove(tps->irq_domain); |
2182 | + tps->irq_domain = NULL; |
2183 | + |
2184 | + return 0; |
2185 | +} |
2186 | + |
2187 | static const struct i2c_device_id tps65217_id_table[] = { |
2188 | {"tps65217", TPS65217}, |
2189 | { /* sentinel */ } |
2190 | @@ -437,6 +455,7 @@ static struct i2c_driver tps65217_driver = { |
2191 | }, |
2192 | .id_table = tps65217_id_table, |
2193 | .probe = tps65217_probe, |
2194 | + .remove = tps65217_remove, |
2195 | }; |
2196 | |
2197 | static int __init tps65217_init(void) |
2198 | diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c |
2199 | index 8cac7ef9ad0d..dbe676de7a19 100644 |
2200 | --- a/drivers/misc/mei/bus.c |
2201 | +++ b/drivers/misc/mei/bus.c |
2202 | @@ -408,7 +408,7 @@ bool mei_cldev_enabled(struct mei_cl_device *cldev) |
2203 | EXPORT_SYMBOL_GPL(mei_cldev_enabled); |
2204 | |
2205 | /** |
2206 | - * mei_cldev_enable_device - enable me client device |
2207 | + * mei_cldev_enable - enable me client device |
2208 | * create connection with me client |
2209 | * |
2210 | * @cldev: me client device |
2211 | diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c |
2212 | index f999a8d3c9c4..e2af61f7e3b6 100644 |
2213 | --- a/drivers/misc/mei/client.c |
2214 | +++ b/drivers/misc/mei/client.c |
2215 | @@ -425,7 +425,7 @@ static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) |
2216 | * |
2217 | * @cl: host client |
2218 | * @length: size of the buffer |
2219 | - * @type: operation type |
2220 | + * @fop_type: operation type |
2221 | * @fp: associated file pointer (might be NULL) |
2222 | * |
2223 | * Return: cb on success and NULL on failure |
2224 | @@ -459,7 +459,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, |
2225 | * |
2226 | * @cl: host client |
2227 | * @length: size of the buffer |
2228 | - * @type: operation type |
2229 | + * @fop_type: operation type |
2230 | * @fp: associated file pointer (might be NULL) |
2231 | * |
2232 | * Return: cb on success and NULL on failure |
2233 | @@ -1536,7 +1536,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, |
2234 | |
2235 | rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1; |
2236 | if (rets < 0) |
2237 | - return rets; |
2238 | + goto err; |
2239 | |
2240 | if (rets == 0) { |
2241 | cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); |
2242 | @@ -1570,11 +1570,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, |
2243 | cb->buf.size, cb->buf_idx); |
2244 | |
2245 | rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); |
2246 | - if (rets) { |
2247 | - cl->status = rets; |
2248 | - list_move_tail(&cb->list, &cmpl_list->list); |
2249 | - return rets; |
2250 | - } |
2251 | + if (rets) |
2252 | + goto err; |
2253 | |
2254 | cl->status = 0; |
2255 | cl->writing_state = MEI_WRITING; |
2256 | @@ -1582,14 +1579,21 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, |
2257 | cb->completed = mei_hdr.msg_complete == 1; |
2258 | |
2259 | if (first_chunk) { |
2260 | - if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) |
2261 | - return -EIO; |
2262 | + if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { |
2263 | + rets = -EIO; |
2264 | + goto err; |
2265 | + } |
2266 | } |
2267 | |
2268 | if (mei_hdr.msg_complete) |
2269 | list_move_tail(&cb->list, &dev->write_waiting_list.list); |
2270 | |
2271 | return 0; |
2272 | + |
2273 | +err: |
2274 | + cl->status = rets; |
2275 | + list_move_tail(&cb->list, &cmpl_list->list); |
2276 | + return rets; |
2277 | } |
2278 | |
2279 | /** |
2280 | diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c |
2281 | index 3678220964fe..df382be62634 100644 |
2282 | --- a/drivers/mmc/card/mmc_test.c |
2283 | +++ b/drivers/mmc/card/mmc_test.c |
2284 | @@ -818,7 +818,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, |
2285 | struct mmc_async_req *cur_areq = &test_areq[0].areq; |
2286 | struct mmc_async_req *other_areq = &test_areq[1].areq; |
2287 | int i; |
2288 | - int ret; |
2289 | + int ret = RESULT_OK; |
2290 | |
2291 | test_areq[0].test = test; |
2292 | test_areq[1].test = test; |
2293 | diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h |
2294 | index 521f1c55c19e..be5b527472f9 100644 |
2295 | --- a/drivers/net/wireless/ath/ath10k/core.h |
2296 | +++ b/drivers/net/wireless/ath/ath10k/core.h |
2297 | @@ -557,10 +557,8 @@ enum ath10k_fw_features { |
2298 | */ |
2299 | ATH10K_FW_FEATURE_BTCOEX_PARAM = 14, |
2300 | |
2301 | - /* Older firmware with HTT delivers incorrect tx status for null func |
2302 | - * frames to driver, but this fixed in 10.2 and 10.4 firmware versions. |
2303 | - * Also this workaround results in reporting of incorrect null func |
2304 | - * status for 10.4. This flag is used to skip the workaround. |
2305 | + /* Unused flag and proven to be not working, enable this if you want |
2306 | + * to experiment sending NULL func data frames in HTT TX |
2307 | */ |
2308 | ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15, |
2309 | |
2310 | diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c |
2311 | index 90eeb1c82e8b..f2e85eb22afe 100644 |
2312 | --- a/drivers/net/wireless/ath/ath10k/mac.c |
2313 | +++ b/drivers/net/wireless/ath/ath10k/mac.c |
2314 | @@ -3255,8 +3255,6 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar, |
2315 | if (ar->htt.target_version_major < 3 && |
2316 | (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && |
2317 | !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, |
2318 | - ar->running_fw->fw_file.fw_features) && |
2319 | - !test_bit(ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR, |
2320 | ar->running_fw->fw_file.fw_features)) |
2321 | return ATH10K_HW_TXRX_MGMT; |
2322 | |
2323 | diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c |
2324 | index 7d9b0da1b010..2ffc1fe4923b 100644 |
2325 | --- a/drivers/net/wireless/ath/ath10k/spectral.c |
2326 | +++ b/drivers/net/wireless/ath/ath10k/spectral.c |
2327 | @@ -338,7 +338,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file, |
2328 | } else { |
2329 | res = -EINVAL; |
2330 | } |
2331 | - } else if (strncmp("background", buf, 9) == 0) { |
2332 | + } else if (strncmp("background", buf, 10) == 0) { |
2333 | res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND); |
2334 | } else if (strncmp("manual", buf, 6) == 0) { |
2335 | res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL); |
2336 | diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c |
2337 | index 4ac928bf1f8e..264466f59c57 100644 |
2338 | --- a/drivers/net/wireless/realtek/rtlwifi/base.c |
2339 | +++ b/drivers/net/wireless/realtek/rtlwifi/base.c |
2340 | @@ -1303,13 +1303,12 @@ EXPORT_SYMBOL_GPL(rtl_action_proc); |
2341 | |
2342 | static void setup_arp_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc) |
2343 | { |
2344 | - struct ieee80211_hw *hw = rtlpriv->hw; |
2345 | - |
2346 | rtlpriv->ra.is_special_data = true; |
2347 | if (rtlpriv->cfg->ops->get_btc_status()) |
2348 | rtlpriv->btcoexist.btc_ops->btc_special_packet_notify( |
2349 | rtlpriv, 1); |
2350 | - rtl_lps_leave(hw); |
2351 | + rtlpriv->enter_ps = false; |
2352 | + schedule_work(&rtlpriv->works.lps_change_work); |
2353 | ppsc->last_delaylps_stamp_jiffies = jiffies; |
2354 | } |
2355 | |
2356 | @@ -1382,7 +1381,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx, |
2357 | |
2358 | if (is_tx) { |
2359 | rtlpriv->ra.is_special_data = true; |
2360 | - rtl_lps_leave(hw); |
2361 | + rtlpriv->enter_ps = false; |
2362 | + schedule_work(&rtlpriv->works.lps_change_work); |
2363 | ppsc->last_delaylps_stamp_jiffies = jiffies; |
2364 | } |
2365 | |
2366 | diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c |
2367 | index 4da4e458142c..8e7f23c11680 100644 |
2368 | --- a/drivers/net/wireless/realtek/rtlwifi/core.c |
2369 | +++ b/drivers/net/wireless/realtek/rtlwifi/core.c |
2370 | @@ -1150,8 +1150,10 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, |
2371 | } else { |
2372 | mstatus = RT_MEDIA_DISCONNECT; |
2373 | |
2374 | - if (mac->link_state == MAC80211_LINKED) |
2375 | - rtl_lps_leave(hw); |
2376 | + if (mac->link_state == MAC80211_LINKED) { |
2377 | + rtlpriv->enter_ps = false; |
2378 | + schedule_work(&rtlpriv->works.lps_change_work); |
2379 | + } |
2380 | if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE) |
2381 | rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); |
2382 | mac->link_state = MAC80211_NOLINK; |
2383 | @@ -1429,7 +1431,8 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw, |
2384 | } |
2385 | |
2386 | if (mac->link_state == MAC80211_LINKED) { |
2387 | - rtl_lps_leave(hw); |
2388 | + rtlpriv->enter_ps = false; |
2389 | + schedule_work(&rtlpriv->works.lps_change_work); |
2390 | mac->link_state = MAC80211_LINKED_SCANNING; |
2391 | } else { |
2392 | rtl_ips_nic_on(hw); |
2393 | diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c |
2394 | index 5be4fc96002d..0dfa9eac3926 100644 |
2395 | --- a/drivers/net/wireless/realtek/rtlwifi/pci.c |
2396 | +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c |
2397 | @@ -663,9 +663,11 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) |
2398 | } |
2399 | |
2400 | if (((rtlpriv->link_info.num_rx_inperiod + |
2401 | - rtlpriv->link_info.num_tx_inperiod) > 8) || |
2402 | - (rtlpriv->link_info.num_rx_inperiod > 2)) |
2403 | - rtl_lps_leave(hw); |
2404 | + rtlpriv->link_info.num_tx_inperiod) > 8) || |
2405 | + (rtlpriv->link_info.num_rx_inperiod > 2)) { |
2406 | + rtlpriv->enter_ps = false; |
2407 | + schedule_work(&rtlpriv->works.lps_change_work); |
2408 | + } |
2409 | } |
2410 | |
2411 | static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, |
2412 | @@ -916,8 +918,10 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) |
2413 | } |
2414 | if (((rtlpriv->link_info.num_rx_inperiod + |
2415 | rtlpriv->link_info.num_tx_inperiod) > 8) || |
2416 | - (rtlpriv->link_info.num_rx_inperiod > 2)) |
2417 | - rtl_lps_leave(hw); |
2418 | + (rtlpriv->link_info.num_rx_inperiod > 2)) { |
2419 | + rtlpriv->enter_ps = false; |
2420 | + schedule_work(&rtlpriv->works.lps_change_work); |
2421 | + } |
2422 | skb = new_skb; |
2423 | no_new: |
2424 | if (rtlpriv->use_new_trx_flow) { |
2425 | diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c |
2426 | index d0ffc4d508cf..18d979affc18 100644 |
2427 | --- a/drivers/net/wireless/realtek/rtlwifi/ps.c |
2428 | +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c |
2429 | @@ -407,8 +407,8 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode) |
2430 | } |
2431 | } |
2432 | |
2433 | -/* Interrupt safe routine to enter the leisure power save mode.*/ |
2434 | -static void rtl_lps_enter_core(struct ieee80211_hw *hw) |
2435 | +/*Enter the leisure power save mode.*/ |
2436 | +void rtl_lps_enter(struct ieee80211_hw *hw) |
2437 | { |
2438 | struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); |
2439 | struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); |
2440 | @@ -444,9 +444,10 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw) |
2441 | |
2442 | spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); |
2443 | } |
2444 | +EXPORT_SYMBOL(rtl_lps_enter); |
2445 | |
2446 | -/* Interrupt safe routine to leave the leisure power save mode.*/ |
2447 | -static void rtl_lps_leave_core(struct ieee80211_hw *hw) |
2448 | +/*Leave the leisure power save mode.*/ |
2449 | +void rtl_lps_leave(struct ieee80211_hw *hw) |
2450 | { |
2451 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
2452 | struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); |
2453 | @@ -476,6 +477,7 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw) |
2454 | } |
2455 | spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); |
2456 | } |
2457 | +EXPORT_SYMBOL(rtl_lps_leave); |
2458 | |
2459 | /* For sw LPS*/ |
2460 | void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len) |
2461 | @@ -668,34 +670,12 @@ void rtl_lps_change_work_callback(struct work_struct *work) |
2462 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
2463 | |
2464 | if (rtlpriv->enter_ps) |
2465 | - rtl_lps_enter_core(hw); |
2466 | + rtl_lps_enter(hw); |
2467 | else |
2468 | - rtl_lps_leave_core(hw); |
2469 | + rtl_lps_leave(hw); |
2470 | } |
2471 | EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback); |
2472 | |
2473 | -void rtl_lps_enter(struct ieee80211_hw *hw) |
2474 | -{ |
2475 | - struct rtl_priv *rtlpriv = rtl_priv(hw); |
2476 | - |
2477 | - if (!in_interrupt()) |
2478 | - return rtl_lps_enter_core(hw); |
2479 | - rtlpriv->enter_ps = true; |
2480 | - schedule_work(&rtlpriv->works.lps_change_work); |
2481 | -} |
2482 | -EXPORT_SYMBOL_GPL(rtl_lps_enter); |
2483 | - |
2484 | -void rtl_lps_leave(struct ieee80211_hw *hw) |
2485 | -{ |
2486 | - struct rtl_priv *rtlpriv = rtl_priv(hw); |
2487 | - |
2488 | - if (!in_interrupt()) |
2489 | - return rtl_lps_leave_core(hw); |
2490 | - rtlpriv->enter_ps = false; |
2491 | - schedule_work(&rtlpriv->works.lps_change_work); |
2492 | -} |
2493 | -EXPORT_SYMBOL_GPL(rtl_lps_leave); |
2494 | - |
2495 | void rtl_swlps_wq_callback(void *data) |
2496 | { |
2497 | struct rtl_works *rtlworks = container_of_dwork_rtl(data, |
2498 | diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c |
2499 | index e04f69beb42d..3452983d3569 100644 |
2500 | --- a/drivers/pci/host/pcie-rockchip.c |
2501 | +++ b/drivers/pci/host/pcie-rockchip.c |
2502 | @@ -533,7 +533,7 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) |
2503 | |
2504 | /* Fix the transmitted FTS count desired to exit from L0s. */ |
2505 | status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1); |
2506 | - status = (status & PCIE_CORE_CTRL_PLC1_FTS_MASK) | |
2507 | + status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) | |
2508 | (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT); |
2509 | rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1); |
2510 | |
2511 | @@ -590,8 +590,8 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) |
2512 | |
2513 | /* Check the final link width from negotiated lane counter from MGMT */ |
2514 | status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); |
2515 | - status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >> |
2516 | - PCIE_CORE_PL_CONF_LANE_MASK); |
2517 | + status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >> |
2518 | + PCIE_CORE_PL_CONF_LANE_SHIFT); |
2519 | dev_dbg(dev, "current link width is x%d\n", status); |
2520 | |
2521 | rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, |
2522 | diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c |
2523 | index dc67f39779ec..c614ff7c3bc3 100644 |
2524 | --- a/drivers/pci/hotplug/rpadlpar_core.c |
2525 | +++ b/drivers/pci/hotplug/rpadlpar_core.c |
2526 | @@ -257,8 +257,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn) |
2527 | |
2528 | static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn) |
2529 | { |
2530 | - if (vio_find_node(dn)) |
2531 | + struct vio_dev *vio_dev; |
2532 | + |
2533 | + vio_dev = vio_find_node(dn); |
2534 | + if (vio_dev) { |
2535 | + put_device(&vio_dev->dev); |
2536 | return -EINVAL; |
2537 | + } |
2538 | |
2539 | if (!vio_register_device_node(dn)) { |
2540 | printk(KERN_ERR |
2541 | @@ -334,6 +339,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn) |
2542 | return -EINVAL; |
2543 | |
2544 | vio_unregister_device(vio_dev); |
2545 | + |
2546 | + put_device(&vio_dev->dev); |
2547 | + |
2548 | return 0; |
2549 | } |
2550 | |
2551 | diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c |
2552 | index ad70507cfb56..3455f752d5e4 100644 |
2553 | --- a/drivers/pci/msi.c |
2554 | +++ b/drivers/pci/msi.c |
2555 | @@ -1294,7 +1294,8 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) |
2556 | } else if (dev->msi_enabled) { |
2557 | struct msi_desc *entry = first_pci_msi_entry(dev); |
2558 | |
2559 | - if (WARN_ON_ONCE(!entry || nr >= entry->nvec_used)) |
2560 | + if (WARN_ON_ONCE(!entry || !entry->affinity || |
2561 | + nr >= entry->nvec_used)) |
2562 | return NULL; |
2563 | |
2564 | return &entry->affinity[nr]; |
2565 | diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
2566 | index c232729f5b1b..3a035e073889 100644 |
2567 | --- a/drivers/pci/quirks.c |
2568 | +++ b/drivers/pci/quirks.c |
2569 | @@ -3137,8 +3137,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay); |
2570 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay); |
2571 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay); |
2572 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay); |
2573 | + |
2574 | /* |
2575 | - * Some devices may pass our check in pci_intx_mask_supported if |
2576 | + * Some devices may pass our check in pci_intx_mask_supported() if |
2577 | * PCI_COMMAND_INTX_DISABLE works though they actually do not properly |
2578 | * support this feature. |
2579 | */ |
2580 | @@ -3146,53 +3147,139 @@ static void quirk_broken_intx_masking(struct pci_dev *dev) |
2581 | { |
2582 | dev->broken_intx_masking = 1; |
2583 | } |
2584 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030, |
2585 | - quirk_broken_intx_masking); |
2586 | -DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */ |
2587 | - quirk_broken_intx_masking); |
2588 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030, |
2589 | + quirk_broken_intx_masking); |
2590 | +DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */ |
2591 | + quirk_broken_intx_masking); |
2592 | + |
2593 | /* |
2594 | * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10) |
2595 | * Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC |
2596 | * |
2597 | * RTL8110SC - Fails under PCI device assignment using DisINTx masking. |
2598 | */ |
2599 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169, |
2600 | - quirk_broken_intx_masking); |
2601 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, |
2602 | - quirk_broken_intx_masking); |
2603 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169, |
2604 | + quirk_broken_intx_masking); |
2605 | |
2606 | /* |
2607 | * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking, |
2608 | * DisINTx can be set but the interrupt status bit is non-functional. |
2609 | */ |
2610 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1572, |
2611 | - quirk_broken_intx_masking); |
2612 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1574, |
2613 | - quirk_broken_intx_masking); |
2614 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1580, |
2615 | - quirk_broken_intx_masking); |
2616 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1581, |
2617 | - quirk_broken_intx_masking); |
2618 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1583, |
2619 | - quirk_broken_intx_masking); |
2620 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1584, |
2621 | - quirk_broken_intx_masking); |
2622 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1585, |
2623 | - quirk_broken_intx_masking); |
2624 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1586, |
2625 | - quirk_broken_intx_masking); |
2626 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1587, |
2627 | - quirk_broken_intx_masking); |
2628 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1588, |
2629 | - quirk_broken_intx_masking); |
2630 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1589, |
2631 | - quirk_broken_intx_masking); |
2632 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d0, |
2633 | - quirk_broken_intx_masking); |
2634 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d1, |
2635 | - quirk_broken_intx_masking); |
2636 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d2, |
2637 | - quirk_broken_intx_masking); |
2638 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, |
2639 | + quirk_broken_intx_masking); |
2640 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, |
2641 | + quirk_broken_intx_masking); |
2642 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, |
2643 | + quirk_broken_intx_masking); |
2644 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, |
2645 | + quirk_broken_intx_masking); |
2646 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, |
2647 | + quirk_broken_intx_masking); |
2648 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, |
2649 | + quirk_broken_intx_masking); |
2650 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, |
2651 | + quirk_broken_intx_masking); |
2652 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, |
2653 | + quirk_broken_intx_masking); |
2654 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, |
2655 | + quirk_broken_intx_masking); |
2656 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, |
2657 | + quirk_broken_intx_masking); |
2658 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, |
2659 | + quirk_broken_intx_masking); |
2660 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, |
2661 | + quirk_broken_intx_masking); |
2662 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, |
2663 | + quirk_broken_intx_masking); |
2664 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, |
2665 | + quirk_broken_intx_masking); |
2666 | + |
2667 | +static u16 mellanox_broken_intx_devs[] = { |
2668 | + PCI_DEVICE_ID_MELLANOX_HERMON_SDR, |
2669 | + PCI_DEVICE_ID_MELLANOX_HERMON_DDR, |
2670 | + PCI_DEVICE_ID_MELLANOX_HERMON_QDR, |
2671 | + PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2, |
2672 | + PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2, |
2673 | + PCI_DEVICE_ID_MELLANOX_HERMON_EN, |
2674 | + PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2, |
2675 | + PCI_DEVICE_ID_MELLANOX_CONNECTX_EN, |
2676 | + PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2, |
2677 | + PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2, |
2678 | + PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2, |
2679 | + PCI_DEVICE_ID_MELLANOX_CONNECTX2, |
2680 | + PCI_DEVICE_ID_MELLANOX_CONNECTX3, |
2681 | + PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO, |
2682 | +}; |
2683 | + |
2684 | +#define CONNECTX_4_CURR_MAX_MINOR 99 |
2685 | +#define CONNECTX_4_INTX_SUPPORT_MINOR 14 |
2686 | + |
2687 | +/* |
2688 | + * Check ConnectX-4/LX FW version to see if it supports legacy interrupts. |
2689 | + * If so, don't mark it as broken. |
2690 | + * FW minor > 99 means older FW version format and no INTx masking support. |
2691 | + * FW minor < 14 means new FW version format and no INTx masking support. |
2692 | + */ |
2693 | +static void mellanox_check_broken_intx_masking(struct pci_dev *pdev) |
2694 | +{ |
2695 | + __be32 __iomem *fw_ver; |
2696 | + u16 fw_major; |
2697 | + u16 fw_minor; |
2698 | + u16 fw_subminor; |
2699 | + u32 fw_maj_min; |
2700 | + u32 fw_sub_min; |
2701 | + int i; |
2702 | + |
2703 | + for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) { |
2704 | + if (pdev->device == mellanox_broken_intx_devs[i]) { |
2705 | + pdev->broken_intx_masking = 1; |
2706 | + return; |
2707 | + } |
2708 | + } |
2709 | + |
2710 | + /* Getting here means Connect-IB cards and up. Connect-IB has no INTx |
2711 | + * support so shouldn't be checked further |
2712 | + */ |
2713 | + if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB) |
2714 | + return; |
2715 | + |
2716 | + if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 && |
2717 | + pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) |
2718 | + return; |
2719 | + |
2720 | + /* For ConnectX-4 and ConnectX-4LX, need to check FW support */ |
2721 | + if (pci_enable_device_mem(pdev)) { |
2722 | + dev_warn(&pdev->dev, "Can't enable device memory\n"); |
2723 | + return; |
2724 | + } |
2725 | + |
2726 | + fw_ver = ioremap(pci_resource_start(pdev, 0), 4); |
2727 | + if (!fw_ver) { |
2728 | + dev_warn(&pdev->dev, "Can't map ConnectX-4 initialization segment\n"); |
2729 | + goto out; |
2730 | + } |
2731 | + |
2732 | + /* Reading from resource space should be 32b aligned */ |
2733 | + fw_maj_min = ioread32be(fw_ver); |
2734 | + fw_sub_min = ioread32be(fw_ver + 1); |
2735 | + fw_major = fw_maj_min & 0xffff; |
2736 | + fw_minor = fw_maj_min >> 16; |
2737 | + fw_subminor = fw_sub_min & 0xffff; |
2738 | + if (fw_minor > CONNECTX_4_CURR_MAX_MINOR || |
2739 | + fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) { |
2740 | + dev_warn(&pdev->dev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n", |
2741 | + fw_major, fw_minor, fw_subminor, pdev->device == |
2742 | + PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14); |
2743 | + pdev->broken_intx_masking = 1; |
2744 | + } |
2745 | + |
2746 | + iounmap(fw_ver); |
2747 | + |
2748 | +out: |
2749 | + pci_disable_device(pdev); |
2750 | +} |
2751 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, |
2752 | + mellanox_check_broken_intx_masking); |
2753 | |
2754 | static void quirk_no_bus_reset(struct pci_dev *dev) |
2755 | { |
2756 | @@ -3255,6 +3342,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C |
2757 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE, |
2758 | quirk_thunderbolt_hotplug_msi); |
2759 | |
2760 | +static void quirk_chelsio_extend_vpd(struct pci_dev *dev) |
2761 | +{ |
2762 | + pci_set_vpd_size(dev, 8192); |
2763 | +} |
2764 | + |
2765 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd); |
2766 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd); |
2767 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd); |
2768 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd); |
2769 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd); |
2770 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd); |
2771 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd); |
2772 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd); |
2773 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd); |
2774 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd); |
2775 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd); |
2776 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd); |
2777 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd); |
2778 | + |
2779 | #ifdef CONFIG_ACPI |
2780 | /* |
2781 | * Apple: Shutdown Cactus Ridge Thunderbolt controller. |
2782 | diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c |
2783 | index aea310a91821..c9a146948192 100644 |
2784 | --- a/drivers/pinctrl/pinctrl-amd.c |
2785 | +++ b/drivers/pinctrl/pinctrl-amd.c |
2786 | @@ -382,26 +382,21 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) |
2787 | { |
2788 | int ret = 0; |
2789 | u32 pin_reg; |
2790 | - unsigned long flags; |
2791 | - bool level_trig; |
2792 | - u32 active_level; |
2793 | + unsigned long flags, irq_flags; |
2794 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
2795 | struct amd_gpio *gpio_dev = gpiochip_get_data(gc); |
2796 | |
2797 | spin_lock_irqsave(&gpio_dev->lock, flags); |
2798 | pin_reg = readl(gpio_dev->base + (d->hwirq)*4); |
2799 | |
2800 | - /* |
2801 | - * When level_trig is set EDGE and active_level is set HIGH in BIOS |
2802 | - * default settings, ignore incoming settings from client and use |
2803 | - * BIOS settings to configure GPIO register. |
2804 | + /* Ignore the settings coming from the client and |
2805 | + * read the values from the ACPI tables |
2806 | + * while setting the trigger type |
2807 | */ |
2808 | - level_trig = !(pin_reg & (LEVEL_TRIGGER << LEVEL_TRIG_OFF)); |
2809 | - active_level = pin_reg & (ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); |
2810 | |
2811 | - if(level_trig && |
2812 | - ((active_level >> ACTIVE_LEVEL_OFF) == ACTIVE_HIGH)) |
2813 | - type = IRQ_TYPE_EDGE_FALLING; |
2814 | + irq_flags = irq_get_trigger_type(d->irq); |
2815 | + if (irq_flags != IRQ_TYPE_NONE) |
2816 | + type = irq_flags; |
2817 | |
2818 | switch (type & IRQ_TYPE_SENSE_MASK) { |
2819 | case IRQ_TYPE_EDGE_RISING: |
2820 | diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c |
2821 | index 61f39abf5dc8..82d67715ce76 100644 |
2822 | --- a/drivers/platform/x86/fujitsu-laptop.c |
2823 | +++ b/drivers/platform/x86/fujitsu-laptop.c |
2824 | @@ -177,43 +177,43 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event); |
2825 | |
2826 | #if IS_ENABLED(CONFIG_LEDS_CLASS) |
2827 | static enum led_brightness logolamp_get(struct led_classdev *cdev); |
2828 | -static void logolamp_set(struct led_classdev *cdev, |
2829 | +static int logolamp_set(struct led_classdev *cdev, |
2830 | enum led_brightness brightness); |
2831 | |
2832 | static struct led_classdev logolamp_led = { |
2833 | .name = "fujitsu::logolamp", |
2834 | .brightness_get = logolamp_get, |
2835 | - .brightness_set = logolamp_set |
2836 | + .brightness_set_blocking = logolamp_set |
2837 | }; |
2838 | |
2839 | static enum led_brightness kblamps_get(struct led_classdev *cdev); |
2840 | -static void kblamps_set(struct led_classdev *cdev, |
2841 | +static int kblamps_set(struct led_classdev *cdev, |
2842 | enum led_brightness brightness); |
2843 | |
2844 | static struct led_classdev kblamps_led = { |
2845 | .name = "fujitsu::kblamps", |
2846 | .brightness_get = kblamps_get, |
2847 | - .brightness_set = kblamps_set |
2848 | + .brightness_set_blocking = kblamps_set |
2849 | }; |
2850 | |
2851 | static enum led_brightness radio_led_get(struct led_classdev *cdev); |
2852 | -static void radio_led_set(struct led_classdev *cdev, |
2853 | +static int radio_led_set(struct led_classdev *cdev, |
2854 | enum led_brightness brightness); |
2855 | |
2856 | static struct led_classdev radio_led = { |
2857 | .name = "fujitsu::radio_led", |
2858 | .brightness_get = radio_led_get, |
2859 | - .brightness_set = radio_led_set |
2860 | + .brightness_set_blocking = radio_led_set |
2861 | }; |
2862 | |
2863 | static enum led_brightness eco_led_get(struct led_classdev *cdev); |
2864 | -static void eco_led_set(struct led_classdev *cdev, |
2865 | +static int eco_led_set(struct led_classdev *cdev, |
2866 | enum led_brightness brightness); |
2867 | |
2868 | static struct led_classdev eco_led = { |
2869 | .name = "fujitsu::eco_led", |
2870 | .brightness_get = eco_led_get, |
2871 | - .brightness_set = eco_led_set |
2872 | + .brightness_set_blocking = eco_led_set |
2873 | }; |
2874 | #endif |
2875 | |
2876 | @@ -267,48 +267,48 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2) |
2877 | #if IS_ENABLED(CONFIG_LEDS_CLASS) |
2878 | /* LED class callbacks */ |
2879 | |
2880 | -static void logolamp_set(struct led_classdev *cdev, |
2881 | +static int logolamp_set(struct led_classdev *cdev, |
2882 | enum led_brightness brightness) |
2883 | { |
2884 | if (brightness >= LED_FULL) { |
2885 | call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON); |
2886 | - call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON); |
2887 | + return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON); |
2888 | } else if (brightness >= LED_HALF) { |
2889 | call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON); |
2890 | - call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF); |
2891 | + return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF); |
2892 | } else { |
2893 | - call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF); |
2894 | + return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF); |
2895 | } |
2896 | } |
2897 | |
2898 | -static void kblamps_set(struct led_classdev *cdev, |
2899 | +static int kblamps_set(struct led_classdev *cdev, |
2900 | enum led_brightness brightness) |
2901 | { |
2902 | if (brightness >= LED_FULL) |
2903 | - call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON); |
2904 | + return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON); |
2905 | else |
2906 | - call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF); |
2907 | + return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF); |
2908 | } |
2909 | |
2910 | -static void radio_led_set(struct led_classdev *cdev, |
2911 | +static int radio_led_set(struct led_classdev *cdev, |
2912 | enum led_brightness brightness) |
2913 | { |
2914 | if (brightness >= LED_FULL) |
2915 | - call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON); |
2916 | + return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON); |
2917 | else |
2918 | - call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0); |
2919 | + return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0); |
2920 | } |
2921 | |
2922 | -static void eco_led_set(struct led_classdev *cdev, |
2923 | +static int eco_led_set(struct led_classdev *cdev, |
2924 | enum led_brightness brightness) |
2925 | { |
2926 | int curr; |
2927 | |
2928 | curr = call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0); |
2929 | if (brightness >= LED_FULL) |
2930 | - call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON); |
2931 | + return call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON); |
2932 | else |
2933 | - call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON); |
2934 | + return call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON); |
2935 | } |
2936 | |
2937 | static enum led_brightness logolamp_get(struct led_classdev *cdev) |
2938 | diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c |
2939 | index 06fef2b4c814..1d4770c02e57 100644 |
2940 | --- a/drivers/rpmsg/qcom_smd.c |
2941 | +++ b/drivers/rpmsg/qcom_smd.c |
2942 | @@ -739,7 +739,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, |
2943 | |
2944 | while (qcom_smd_get_tx_avail(channel) < tlen) { |
2945 | if (!wait) { |
2946 | - ret = -ENOMEM; |
2947 | + ret = -EAGAIN; |
2948 | goto out; |
2949 | } |
2950 | |
2951 | diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c |
2952 | index cbf010324c18..596a75924d90 100644 |
2953 | --- a/drivers/scsi/g_NCR5380.c |
2954 | +++ b/drivers/scsi/g_NCR5380.c |
2955 | @@ -170,12 +170,12 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, |
2956 | if (ports[i]) { |
2957 | /* At this point we have our region reserved */ |
2958 | magic_configure(i, 0, magic); /* no IRQ yet */ |
2959 | - outb(0xc0, ports[i] + 9); |
2960 | - if (inb(ports[i] + 9) != 0x80) { |
2961 | + base = ports[i]; |
2962 | + outb(0xc0, base + 9); |
2963 | + if (inb(base + 9) != 0x80) { |
2964 | ret = -ENODEV; |
2965 | goto out_release; |
2966 | } |
2967 | - base = ports[i]; |
2968 | port_idx = i; |
2969 | } else |
2970 | return -EINVAL; |
2971 | diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c |
2972 | index 4c57d9abce7b..7de5d8d75480 100644 |
2973 | --- a/drivers/scsi/mvsas/mv_94xx.c |
2974 | +++ b/drivers/scsi/mvsas/mv_94xx.c |
2975 | @@ -668,7 +668,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) |
2976 | { |
2977 | u32 tmp; |
2978 | tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3)); |
2979 | - if (tmp && 1 << (slot_idx % 32)) { |
2980 | + if (tmp & 1 << (slot_idx % 32)) { |
2981 | mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx); |
2982 | mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3), |
2983 | 1 << (slot_idx % 32)); |
2984 | diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c |
2985 | index f79ee61851f6..cbd6bc52050f 100644 |
2986 | --- a/drivers/staging/iio/adc/ad7606_core.c |
2987 | +++ b/drivers/staging/iio/adc/ad7606_core.c |
2988 | @@ -189,7 +189,7 @@ static ssize_t ad7606_store_oversampling_ratio(struct device *dev, |
2989 | mutex_lock(&indio_dev->mlock); |
2990 | gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1); |
2991 | gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1); |
2992 | - gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1); |
2993 | + gpio_set_value(st->pdata->gpio_os2, (ret >> 2) & 1); |
2994 | st->oversampling = lval; |
2995 | mutex_unlock(&indio_dev->mlock); |
2996 | |
2997 | diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c |
2998 | index 8be9f854510f..89dd6b989254 100644 |
2999 | --- a/drivers/staging/media/davinci_vpfe/vpfe_video.c |
3000 | +++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c |
3001 | @@ -1362,7 +1362,7 @@ static int vpfe_reqbufs(struct file *file, void *priv, |
3002 | ret = vb2_queue_init(q); |
3003 | if (ret) { |
3004 | v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n"); |
3005 | - return ret; |
3006 | + goto unlock_out; |
3007 | } |
3008 | |
3009 | fh->io_allowed = 1; |
3010 | diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c |
3011 | index d02e3e31ed29..12354440a334 100644 |
3012 | --- a/drivers/staging/octeon/ethernet.c |
3013 | +++ b/drivers/staging/octeon/ethernet.c |
3014 | @@ -776,6 +776,7 @@ static int cvm_oct_probe(struct platform_device *pdev) |
3015 | /* Initialize the device private structure. */ |
3016 | struct octeon_ethernet *priv = netdev_priv(dev); |
3017 | |
3018 | + SET_NETDEV_DEV(dev, &pdev->dev); |
3019 | dev->netdev_ops = &cvm_oct_pow_netdev_ops; |
3020 | priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; |
3021 | priv->port = CVMX_PIP_NUM_INPUT_PORTS; |
3022 | @@ -820,6 +821,7 @@ static int cvm_oct_probe(struct platform_device *pdev) |
3023 | } |
3024 | |
3025 | /* Initialize the device private structure. */ |
3026 | + SET_NETDEV_DEV(dev, &pdev->dev); |
3027 | priv = netdev_priv(dev); |
3028 | priv->netdev = dev; |
3029 | priv->of_node = cvm_oct_node_for_port(pip, interface, |
3030 | diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c |
3031 | index 0814e5894a96..205a509b0dfb 100644 |
3032 | --- a/drivers/target/iscsi/iscsi_target_tpg.c |
3033 | +++ b/drivers/target/iscsi/iscsi_target_tpg.c |
3034 | @@ -260,7 +260,6 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro |
3035 | iscsi_release_param_list(tpg->param_list); |
3036 | tpg->param_list = NULL; |
3037 | } |
3038 | - kfree(tpg); |
3039 | return -ENOMEM; |
3040 | } |
3041 | |
3042 | diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c |
3043 | index 58bb6ed18185..6ca388eca33b 100644 |
3044 | --- a/drivers/target/sbp/sbp_target.c |
3045 | +++ b/drivers/target/sbp/sbp_target.c |
3046 | @@ -928,7 +928,7 @@ static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess, |
3047 | struct sbp_target_request *req; |
3048 | int tag; |
3049 | |
3050 | - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); |
3051 | + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); |
3052 | if (tag < 0) |
3053 | return ERR_PTR(-ENOMEM); |
3054 | |
3055 | diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c |
3056 | index a2d90aca779f..1f7036c8f57b 100644 |
3057 | --- a/drivers/usb/core/config.c |
3058 | +++ b/drivers/usb/core/config.c |
3059 | @@ -234,6 +234,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, |
3060 | if (ifp->desc.bNumEndpoints >= num_ep) |
3061 | goto skip_to_next_endpoint_or_interface_descriptor; |
3062 | |
3063 | + /* Check for duplicate endpoint addresses */ |
3064 | + for (i = 0; i < ifp->desc.bNumEndpoints; ++i) { |
3065 | + if (ifp->endpoint[i].desc.bEndpointAddress == |
3066 | + d->bEndpointAddress) { |
3067 | + dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", |
3068 | + cfgno, inum, asnum, d->bEndpointAddress); |
3069 | + goto skip_to_next_endpoint_or_interface_descriptor; |
3070 | + } |
3071 | + } |
3072 | + |
3073 | endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; |
3074 | ++ifp->desc.bNumEndpoints; |
3075 | |
3076 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
3077 | index 0d81436c94bd..aef81a16e2c8 100644 |
3078 | --- a/drivers/usb/core/hub.c |
3079 | +++ b/drivers/usb/core/hub.c |
3080 | @@ -101,8 +101,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); |
3081 | |
3082 | static void hub_release(struct kref *kref); |
3083 | static int usb_reset_and_verify_device(struct usb_device *udev); |
3084 | -static void hub_usb3_port_prepare_disable(struct usb_hub *hub, |
3085 | - struct usb_port *port_dev); |
3086 | +static int hub_port_disable(struct usb_hub *hub, int port1, int set_state); |
3087 | |
3088 | static inline char *portspeed(struct usb_hub *hub, int portstatus) |
3089 | { |
3090 | @@ -901,34 +900,6 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1, |
3091 | } |
3092 | |
3093 | /* |
3094 | - * USB-3 does not have a similar link state as USB-2 that will avoid negotiating |
3095 | - * a connection with a plugged-in cable but will signal the host when the cable |
3096 | - * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices |
3097 | - */ |
3098 | -static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) |
3099 | -{ |
3100 | - struct usb_port *port_dev = hub->ports[port1 - 1]; |
3101 | - struct usb_device *hdev = hub->hdev; |
3102 | - int ret = 0; |
3103 | - |
3104 | - if (!hub->error) { |
3105 | - if (hub_is_superspeed(hub->hdev)) { |
3106 | - hub_usb3_port_prepare_disable(hub, port_dev); |
3107 | - ret = hub_set_port_link_state(hub, port_dev->portnum, |
3108 | - USB_SS_PORT_LS_U3); |
3109 | - } else { |
3110 | - ret = usb_clear_port_feature(hdev, port1, |
3111 | - USB_PORT_FEAT_ENABLE); |
3112 | - } |
3113 | - } |
3114 | - if (port_dev->child && set_state) |
3115 | - usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED); |
3116 | - if (ret && ret != -ENODEV) |
3117 | - dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret); |
3118 | - return ret; |
3119 | -} |
3120 | - |
3121 | -/* |
3122 | * Disable a port and mark a logical connect-change event, so that some |
3123 | * time later hub_wq will disconnect() any existing usb_device on the port |
3124 | * and will re-enumerate if there actually is a device attached. |
3125 | @@ -4153,6 +4124,34 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port, |
3126 | |
3127 | #endif /* CONFIG_PM */ |
3128 | |
3129 | +/* |
3130 | + * USB-3 does not have a similar link state as USB-2 that will avoid negotiating |
3131 | + * a connection with a plugged-in cable but will signal the host when the cable |
3132 | + * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices |
3133 | + */ |
3134 | +static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) |
3135 | +{ |
3136 | + struct usb_port *port_dev = hub->ports[port1 - 1]; |
3137 | + struct usb_device *hdev = hub->hdev; |
3138 | + int ret = 0; |
3139 | + |
3140 | + if (!hub->error) { |
3141 | + if (hub_is_superspeed(hub->hdev)) { |
3142 | + hub_usb3_port_prepare_disable(hub, port_dev); |
3143 | + ret = hub_set_port_link_state(hub, port_dev->portnum, |
3144 | + USB_SS_PORT_LS_U3); |
3145 | + } else { |
3146 | + ret = usb_clear_port_feature(hdev, port1, |
3147 | + USB_PORT_FEAT_ENABLE); |
3148 | + } |
3149 | + } |
3150 | + if (port_dev->child && set_state) |
3151 | + usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED); |
3152 | + if (ret && ret != -ENODEV) |
3153 | + dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret); |
3154 | + return ret; |
3155 | +} |
3156 | + |
3157 | |
3158 | /* USB 2.0 spec, 7.1.7.3 / fig 7-29: |
3159 | * |
3160 | diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h |
3161 | index 6b60e42626a2..884c43714456 100644 |
3162 | --- a/drivers/usb/dwc3/core.h |
3163 | +++ b/drivers/usb/dwc3/core.h |
3164 | @@ -43,9 +43,7 @@ |
3165 | #define DWC3_XHCI_RESOURCES_NUM 2 |
3166 | |
3167 | #define DWC3_SCRATCHBUF_SIZE 4096 /* each buffer is assumed to be 4KiB */ |
3168 | -#define DWC3_EVENT_SIZE 4 /* bytes */ |
3169 | -#define DWC3_EVENT_MAX_NUM 64 /* 2 events/endpoint */ |
3170 | -#define DWC3_EVENT_BUFFERS_SIZE (DWC3_EVENT_SIZE * DWC3_EVENT_MAX_NUM) |
3171 | +#define DWC3_EVENT_BUFFERS_SIZE 4096 |
3172 | #define DWC3_EVENT_TYPE_MASK 0xfe |
3173 | |
3174 | #define DWC3_EVENT_TYPE_DEV 0 |
3175 | @@ -303,9 +301,8 @@ |
3176 | #define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */ |
3177 | #define DWC3_DCFG_SUPERSPEED (4 << 0) |
3178 | #define DWC3_DCFG_HIGHSPEED (0 << 0) |
3179 | -#define DWC3_DCFG_FULLSPEED2 (1 << 0) |
3180 | +#define DWC3_DCFG_FULLSPEED (1 << 0) |
3181 | #define DWC3_DCFG_LOWSPEED (2 << 0) |
3182 | -#define DWC3_DCFG_FULLSPEED1 (3 << 0) |
3183 | |
3184 | #define DWC3_DCFG_NUMP_SHIFT 17 |
3185 | #define DWC3_DCFG_NUMP(n) (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f) |
3186 | @@ -397,9 +394,8 @@ |
3187 | #define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */ |
3188 | #define DWC3_DSTS_SUPERSPEED (4 << 0) |
3189 | #define DWC3_DSTS_HIGHSPEED (0 << 0) |
3190 | -#define DWC3_DSTS_FULLSPEED2 (1 << 0) |
3191 | +#define DWC3_DSTS_FULLSPEED (1 << 0) |
3192 | #define DWC3_DSTS_LOWSPEED (2 << 0) |
3193 | -#define DWC3_DSTS_FULLSPEED1 (3 << 0) |
3194 | |
3195 | /* Device Generic Command Register */ |
3196 | #define DWC3_DGCMD_SET_LMP 0x01 |
3197 | diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c |
3198 | index 6df0f5dad9a4..427291a19e6d 100644 |
3199 | --- a/drivers/usb/dwc3/dwc3-pci.c |
3200 | +++ b/drivers/usb/dwc3/dwc3-pci.c |
3201 | @@ -38,6 +38,7 @@ |
3202 | #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa |
3203 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa |
3204 | #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 |
3205 | +#define PCI_DEVICE_ID_INTEL_GLK 0x31aa |
3206 | |
3207 | static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; |
3208 | static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; |
3209 | @@ -81,7 +82,7 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3) |
3210 | int ret; |
3211 | |
3212 | struct property_entry properties[] = { |
3213 | - PROPERTY_ENTRY_STRING("dr-mode", "peripheral"), |
3214 | + PROPERTY_ENTRY_STRING("dr_mode", "peripheral"), |
3215 | { } |
3216 | }; |
3217 | |
3218 | @@ -229,6 +230,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { |
3219 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, |
3220 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, |
3221 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, |
3222 | + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, |
3223 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, |
3224 | { } /* Terminating Entry */ |
3225 | }; |
3226 | diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c |
3227 | index fe79d771dee4..2331469f943d 100644 |
3228 | --- a/drivers/usb/dwc3/ep0.c |
3229 | +++ b/drivers/usb/dwc3/ep0.c |
3230 | @@ -55,20 +55,13 @@ static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state) |
3231 | } |
3232 | } |
3233 | |
3234 | -static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma, |
3235 | - u32 len, u32 type, bool chain) |
3236 | +static void dwc3_ep0_prepare_one_trb(struct dwc3 *dwc, u8 epnum, |
3237 | + dma_addr_t buf_dma, u32 len, u32 type, bool chain) |
3238 | { |
3239 | - struct dwc3_gadget_ep_cmd_params params; |
3240 | struct dwc3_trb *trb; |
3241 | struct dwc3_ep *dep; |
3242 | |
3243 | - int ret; |
3244 | - |
3245 | dep = dwc->eps[epnum]; |
3246 | - if (dep->flags & DWC3_EP_BUSY) { |
3247 | - dwc3_trace(trace_dwc3_ep0, "%s still busy", dep->name); |
3248 | - return 0; |
3249 | - } |
3250 | |
3251 | trb = &dwc->ep0_trb[dep->trb_enqueue]; |
3252 | |
3253 | @@ -89,15 +82,25 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma, |
3254 | trb->ctrl |= (DWC3_TRB_CTRL_IOC |
3255 | | DWC3_TRB_CTRL_LST); |
3256 | |
3257 | - if (chain) |
3258 | + trace_dwc3_prepare_trb(dep, trb); |
3259 | +} |
3260 | + |
3261 | +static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum) |
3262 | +{ |
3263 | + struct dwc3_gadget_ep_cmd_params params; |
3264 | + struct dwc3_ep *dep; |
3265 | + int ret; |
3266 | + |
3267 | + dep = dwc->eps[epnum]; |
3268 | + if (dep->flags & DWC3_EP_BUSY) { |
3269 | + dwc3_trace(trace_dwc3_ep0, "%s still busy", dep->name); |
3270 | return 0; |
3271 | + } |
3272 | |
3273 | memset(¶ms, 0, sizeof(params)); |
3274 | params.param0 = upper_32_bits(dwc->ep0_trb_addr); |
3275 | params.param1 = lower_32_bits(dwc->ep0_trb_addr); |
3276 | |
3277 | - trace_dwc3_prepare_trb(dep, trb); |
3278 | - |
3279 | ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, ¶ms); |
3280 | if (ret < 0) { |
3281 | dwc3_trace(trace_dwc3_ep0, "%s STARTTRANSFER failed", |
3282 | @@ -308,8 +311,9 @@ void dwc3_ep0_out_start(struct dwc3 *dwc) |
3283 | { |
3284 | int ret; |
3285 | |
3286 | - ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8, |
3287 | + dwc3_ep0_prepare_one_trb(dwc, 0, dwc->ctrl_req_addr, 8, |
3288 | DWC3_TRBCTL_CONTROL_SETUP, false); |
3289 | + ret = dwc3_ep0_start_trans(dwc, 0); |
3290 | WARN_ON(ret < 0); |
3291 | } |
3292 | |
3293 | @@ -880,9 +884,9 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, |
3294 | |
3295 | dwc->ep0_next_event = DWC3_EP0_COMPLETE; |
3296 | |
3297 | - ret = dwc3_ep0_start_trans(dwc, epnum, |
3298 | - dwc->ctrl_req_addr, 0, |
3299 | - DWC3_TRBCTL_CONTROL_DATA, false); |
3300 | + dwc3_ep0_prepare_one_trb(dwc, epnum, dwc->ctrl_req_addr, |
3301 | + 0, DWC3_TRBCTL_CONTROL_DATA, false); |
3302 | + ret = dwc3_ep0_start_trans(dwc, epnum); |
3303 | WARN_ON(ret < 0); |
3304 | } |
3305 | } |
3306 | @@ -966,9 +970,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, |
3307 | req->direction = !!dep->number; |
3308 | |
3309 | if (req->request.length == 0) { |
3310 | - ret = dwc3_ep0_start_trans(dwc, dep->number, |
3311 | + dwc3_ep0_prepare_one_trb(dwc, dep->number, |
3312 | dwc->ctrl_req_addr, 0, |
3313 | DWC3_TRBCTL_CONTROL_DATA, false); |
3314 | + ret = dwc3_ep0_start_trans(dwc, dep->number); |
3315 | } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) |
3316 | && (dep->number == 0)) { |
3317 | u32 transfer_size = 0; |
3318 | @@ -986,7 +991,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, |
3319 | if (req->request.length > DWC3_EP0_BOUNCE_SIZE) { |
3320 | transfer_size = ALIGN(req->request.length - maxpacket, |
3321 | maxpacket); |
3322 | - ret = dwc3_ep0_start_trans(dwc, dep->number, |
3323 | + dwc3_ep0_prepare_one_trb(dwc, dep->number, |
3324 | req->request.dma, |
3325 | transfer_size, |
3326 | DWC3_TRBCTL_CONTROL_DATA, |
3327 | @@ -998,9 +1003,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, |
3328 | |
3329 | dwc->ep0_bounced = true; |
3330 | |
3331 | - ret = dwc3_ep0_start_trans(dwc, dep->number, |
3332 | + dwc3_ep0_prepare_one_trb(dwc, dep->number, |
3333 | dwc->ep0_bounce_addr, transfer_size, |
3334 | DWC3_TRBCTL_CONTROL_DATA, false); |
3335 | + ret = dwc3_ep0_start_trans(dwc, dep->number); |
3336 | } else { |
3337 | ret = usb_gadget_map_request(&dwc->gadget, &req->request, |
3338 | dep->number); |
3339 | @@ -1009,9 +1015,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, |
3340 | return; |
3341 | } |
3342 | |
3343 | - ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma, |
3344 | + dwc3_ep0_prepare_one_trb(dwc, dep->number, req->request.dma, |
3345 | req->request.length, DWC3_TRBCTL_CONTROL_DATA, |
3346 | false); |
3347 | + ret = dwc3_ep0_start_trans(dwc, dep->number); |
3348 | } |
3349 | |
3350 | WARN_ON(ret < 0); |
3351 | @@ -1025,8 +1032,9 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep) |
3352 | type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3 |
3353 | : DWC3_TRBCTL_CONTROL_STATUS2; |
3354 | |
3355 | - return dwc3_ep0_start_trans(dwc, dep->number, |
3356 | + dwc3_ep0_prepare_one_trb(dwc, dep->number, |
3357 | dwc->ctrl_req_addr, 0, type, false); |
3358 | + return dwc3_ep0_start_trans(dwc, dep->number); |
3359 | } |
3360 | |
3361 | static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep) |
3362 | diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c |
3363 | index b3687e223e00..d2b860ebfe13 100644 |
3364 | --- a/drivers/usb/dwc3/gadget.c |
3365 | +++ b/drivers/usb/dwc3/gadget.c |
3366 | @@ -182,11 +182,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, |
3367 | if (req->request.status == -EINPROGRESS) |
3368 | req->request.status = status; |
3369 | |
3370 | - if (dwc->ep0_bounced && dep->number == 0) |
3371 | + if (dwc->ep0_bounced && dep->number <= 1) |
3372 | dwc->ep0_bounced = false; |
3373 | - else |
3374 | - usb_gadget_unmap_request(&dwc->gadget, &req->request, |
3375 | - req->direction); |
3376 | + |
3377 | + usb_gadget_unmap_request(&dwc->gadget, &req->request, |
3378 | + req->direction); |
3379 | |
3380 | trace_dwc3_gadget_giveback(req); |
3381 | |
3382 | @@ -1606,7 +1606,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc) |
3383 | reg |= DWC3_DCFG_LOWSPEED; |
3384 | break; |
3385 | case USB_SPEED_FULL: |
3386 | - reg |= DWC3_DCFG_FULLSPEED1; |
3387 | + reg |= DWC3_DCFG_FULLSPEED; |
3388 | break; |
3389 | case USB_SPEED_HIGH: |
3390 | reg |= DWC3_DCFG_HIGHSPEED; |
3391 | @@ -2465,8 +2465,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) |
3392 | dwc->gadget.ep0->maxpacket = 64; |
3393 | dwc->gadget.speed = USB_SPEED_HIGH; |
3394 | break; |
3395 | - case DWC3_DSTS_FULLSPEED2: |
3396 | - case DWC3_DSTS_FULLSPEED1: |
3397 | + case DWC3_DSTS_FULLSPEED: |
3398 | dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); |
3399 | dwc->gadget.ep0->maxpacket = 64; |
3400 | dwc->gadget.speed = USB_SPEED_FULL; |
3401 | diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c |
3402 | index e38b21087d26..c3c5b87b35b3 100644 |
3403 | --- a/drivers/usb/gadget/composite.c |
3404 | +++ b/drivers/usb/gadget/composite.c |
3405 | @@ -205,7 +205,7 @@ int config_ep_by_speed(struct usb_gadget *g, |
3406 | |
3407 | if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) || |
3408 | usb_endpoint_xfer_int(_ep->desc))) |
3409 | - _ep->mult = usb_endpoint_maxp(_ep->desc) & 0x7ff; |
3410 | + _ep->mult = ((usb_endpoint_maxp(_ep->desc) & 0x1800) >> 11) + 1; |
3411 | |
3412 | if (!want_comp_desc) |
3413 | return 0; |
3414 | @@ -1694,9 +1694,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) |
3415 | value = min(w_length, (u16) 1); |
3416 | break; |
3417 | |
3418 | - /* function drivers must handle get/set altsetting; if there's |
3419 | - * no get() method, we know only altsetting zero works. |
3420 | - */ |
3421 | + /* function drivers must handle get/set altsetting */ |
3422 | case USB_REQ_SET_INTERFACE: |
3423 | if (ctrl->bRequestType != USB_RECIP_INTERFACE) |
3424 | goto unknown; |
3425 | @@ -1705,7 +1703,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) |
3426 | f = cdev->config->interface[intf]; |
3427 | if (!f) |
3428 | break; |
3429 | - if (w_value && !f->set_alt) |
3430 | + |
3431 | + /* |
3432 | + * If there's no get_alt() method, we know only altsetting zero |
3433 | + * works. There is no need to check if set_alt() is not NULL |
3434 | + * as we check this in usb_add_function(). |
3435 | + */ |
3436 | + if (w_value && !f->get_alt) |
3437 | break; |
3438 | value = f->set_alt(f, w_index, w_value); |
3439 | if (value == USB_GADGET_DELAYED_STATUS) { |
3440 | diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c |
3441 | index 197f73386fac..d2351139342f 100644 |
3442 | --- a/drivers/usb/gadget/function/f_tcm.c |
3443 | +++ b/drivers/usb/gadget/function/f_tcm.c |
3444 | @@ -1073,7 +1073,7 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu, |
3445 | struct usbg_cmd *cmd; |
3446 | int tag; |
3447 | |
3448 | - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); |
3449 | + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); |
3450 | if (tag < 0) |
3451 | return ERR_PTR(-ENOMEM); |
3452 | |
3453 | diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c |
3454 | index bd82dd12deff..1468d8f085a3 100644 |
3455 | --- a/drivers/usb/gadget/legacy/inode.c |
3456 | +++ b/drivers/usb/gadget/legacy/inode.c |
3457 | @@ -1126,7 +1126,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) |
3458 | /* data and/or status stage for control request */ |
3459 | } else if (dev->state == STATE_DEV_SETUP) { |
3460 | |
3461 | - /* IN DATA+STATUS caller makes len <= wLength */ |
3462 | + len = min_t(size_t, len, dev->setup_wLength); |
3463 | if (dev->setup_in) { |
3464 | retval = setup_req (dev->gadget->ep0, dev->req, len); |
3465 | if (retval == 0) { |
3466 | @@ -1734,10 +1734,12 @@ static struct usb_gadget_driver gadgetfs_driver = { |
3467 | * such as configuration notifications. |
3468 | */ |
3469 | |
3470 | -static int is_valid_config (struct usb_config_descriptor *config) |
3471 | +static int is_valid_config(struct usb_config_descriptor *config, |
3472 | + unsigned int total) |
3473 | { |
3474 | return config->bDescriptorType == USB_DT_CONFIG |
3475 | && config->bLength == USB_DT_CONFIG_SIZE |
3476 | + && total >= USB_DT_CONFIG_SIZE |
3477 | && config->bConfigurationValue != 0 |
3478 | && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0 |
3479 | && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0; |
3480 | @@ -1762,7 +1764,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) |
3481 | } |
3482 | spin_unlock_irq(&dev->lock); |
3483 | |
3484 | - if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) |
3485 | + if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) || |
3486 | + (len > PAGE_SIZE * 4)) |
3487 | return -EINVAL; |
3488 | |
3489 | /* we might need to change message format someday */ |
3490 | @@ -1786,7 +1789,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) |
3491 | /* full or low speed config */ |
3492 | dev->config = (void *) kbuf; |
3493 | total = le16_to_cpu(dev->config->wTotalLength); |
3494 | - if (!is_valid_config (dev->config) || total >= length) |
3495 | + if (!is_valid_config(dev->config, total) || |
3496 | + total > length - USB_DT_DEVICE_SIZE) |
3497 | goto fail; |
3498 | kbuf += total; |
3499 | length -= total; |
3500 | @@ -1795,10 +1799,13 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) |
3501 | if (kbuf [1] == USB_DT_CONFIG) { |
3502 | dev->hs_config = (void *) kbuf; |
3503 | total = le16_to_cpu(dev->hs_config->wTotalLength); |
3504 | - if (!is_valid_config (dev->hs_config) || total >= length) |
3505 | + if (!is_valid_config(dev->hs_config, total) || |
3506 | + total > length - USB_DT_DEVICE_SIZE) |
3507 | goto fail; |
3508 | kbuf += total; |
3509 | length -= total; |
3510 | + } else { |
3511 | + dev->hs_config = NULL; |
3512 | } |
3513 | |
3514 | /* could support multiple configs, using another encoding! */ |
3515 | diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c |
3516 | index 9483489080f6..0402177f93cd 100644 |
3517 | --- a/drivers/usb/gadget/udc/core.c |
3518 | +++ b/drivers/usb/gadget/udc/core.c |
3519 | @@ -1317,7 +1317,11 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver) |
3520 | if (!ret) |
3521 | break; |
3522 | } |
3523 | - if (!ret && !udc->driver) |
3524 | + if (ret) |
3525 | + ret = -ENODEV; |
3526 | + else if (udc->driver) |
3527 | + ret = -EBUSY; |
3528 | + else |
3529 | goto found; |
3530 | } else { |
3531 | list_for_each_entry(udc, &udc_list, list) { |
3532 | diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c |
3533 | index 77d07904f932..a81d9ab861dc 100644 |
3534 | --- a/drivers/usb/gadget/udc/dummy_hcd.c |
3535 | +++ b/drivers/usb/gadget/udc/dummy_hcd.c |
3536 | @@ -330,7 +330,7 @@ static void nuke(struct dummy *dum, struct dummy_ep *ep) |
3537 | /* caller must hold lock */ |
3538 | static void stop_activity(struct dummy *dum) |
3539 | { |
3540 | - struct dummy_ep *ep; |
3541 | + int i; |
3542 | |
3543 | /* prevent any more requests */ |
3544 | dum->address = 0; |
3545 | @@ -338,8 +338,8 @@ static void stop_activity(struct dummy *dum) |
3546 | /* The timer is left running so that outstanding URBs can fail */ |
3547 | |
3548 | /* nuke any pending requests first, so driver i/o is quiesced */ |
3549 | - list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list) |
3550 | - nuke(dum, ep); |
3551 | + for (i = 0; i < DUMMY_ENDPOINTS; ++i) |
3552 | + nuke(dum, &dum->ep[i]); |
3553 | |
3554 | /* driver now does any non-usb quiescing necessary */ |
3555 | } |
3556 | diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c |
3557 | index 6afe32381209..7064892ff4a6 100644 |
3558 | --- a/drivers/usb/host/xhci-mem.c |
3559 | +++ b/drivers/usb/host/xhci-mem.c |
3560 | @@ -979,6 +979,40 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) |
3561 | xhci->devs[slot_id] = NULL; |
3562 | } |
3563 | |
3564 | +/* |
3565 | + * Free a virt_device structure. |
3566 | + * If the virt_device added a tt_info (a hub) and has children pointing to |
3567 | + * that tt_info, then free the child first. Recursive. |
3568 | + * We can't rely on udev at this point to find child-parent relationships. |
3569 | + */ |
3570 | +void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) |
3571 | +{ |
3572 | + struct xhci_virt_device *vdev; |
3573 | + struct list_head *tt_list_head; |
3574 | + struct xhci_tt_bw_info *tt_info, *next; |
3575 | + int i; |
3576 | + |
3577 | + vdev = xhci->devs[slot_id]; |
3578 | + if (!vdev) |
3579 | + return; |
3580 | + |
3581 | + tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); |
3582 | + list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { |
3583 | + /* is this a hub device that added a tt_info to the tts list */ |
3584 | + if (tt_info->slot_id == slot_id) { |
3585 | + /* are any devices using this tt_info? */ |
3586 | + for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) { |
3587 | + vdev = xhci->devs[i]; |
3588 | + if (vdev && (vdev->tt_info == tt_info)) |
3589 | + xhci_free_virt_devices_depth_first( |
3590 | + xhci, i); |
3591 | + } |
3592 | + } |
3593 | + } |
3594 | + /* we are now at a leaf device */ |
3595 | + xhci_free_virt_device(xhci, slot_id); |
3596 | +} |
3597 | + |
3598 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, |
3599 | struct usb_device *udev, gfp_t flags) |
3600 | { |
3601 | @@ -1796,7 +1830,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) |
3602 | int size; |
3603 | int i, j, num_ports; |
3604 | |
3605 | - del_timer_sync(&xhci->cmd_timer); |
3606 | + cancel_delayed_work_sync(&xhci->cmd_timer); |
3607 | |
3608 | /* Free the Event Ring Segment Table and the actual Event Ring */ |
3609 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); |
3610 | @@ -1829,8 +1863,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) |
3611 | } |
3612 | } |
3613 | |
3614 | - for (i = 1; i < MAX_HC_SLOTS; ++i) |
3615 | - xhci_free_virt_device(xhci, i); |
3616 | + for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--) |
3617 | + xhci_free_virt_devices_depth_first(xhci, i); |
3618 | |
3619 | dma_pool_destroy(xhci->segment_pool); |
3620 | xhci->segment_pool = NULL; |
3621 | @@ -2343,9 +2377,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) |
3622 | |
3623 | INIT_LIST_HEAD(&xhci->cmd_list); |
3624 | |
3625 | - /* init command timeout timer */ |
3626 | - setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout, |
3627 | - (unsigned long)xhci); |
3628 | + /* init command timeout work */ |
3629 | + INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); |
3630 | + init_completion(&xhci->cmd_ring_stop_completion); |
3631 | |
3632 | page_size = readl(&xhci->op_regs->page_size); |
3633 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
3634 | @@ -2384,7 +2418,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) |
3635 | * "physically contiguous and 64-byte (cache line) aligned". |
3636 | */ |
3637 | xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, |
3638 | - GFP_KERNEL); |
3639 | + flags); |
3640 | if (!xhci->dcbaa) |
3641 | goto fail; |
3642 | memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); |
3643 | @@ -2480,7 +2514,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) |
3644 | |
3645 | xhci->erst.entries = dma_alloc_coherent(dev, |
3646 | sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma, |
3647 | - GFP_KERNEL); |
3648 | + flags); |
3649 | if (!xhci->erst.entries) |
3650 | goto fail; |
3651 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
3652 | diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c |
3653 | index 79959f17c38c..f2365a47fa4a 100644 |
3654 | --- a/drivers/usb/host/xhci-mtk.c |
3655 | +++ b/drivers/usb/host/xhci-mtk.c |
3656 | @@ -560,8 +560,10 @@ static int xhci_mtk_probe(struct platform_device *pdev) |
3657 | goto disable_ldos; |
3658 | |
3659 | irq = platform_get_irq(pdev, 0); |
3660 | - if (irq < 0) |
3661 | + if (irq < 0) { |
3662 | + ret = irq; |
3663 | goto disable_clk; |
3664 | + } |
3665 | |
3666 | /* Initialize dma_mask and coherent_dma_mask to 32-bits */ |
3667 | ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); |
3668 | diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c |
3669 | index e96ae80d107e..954abfd5014d 100644 |
3670 | --- a/drivers/usb/host/xhci-pci.c |
3671 | +++ b/drivers/usb/host/xhci-pci.c |
3672 | @@ -165,7 +165,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) |
3673 | pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || |
3674 | pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || |
3675 | pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || |
3676 | - pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) { |
3677 | + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || |
3678 | + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) { |
3679 | xhci->quirks |= XHCI_PME_STUCK_QUIRK; |
3680 | } |
3681 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
3682 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
3683 | index 797137e26549..771a6da9caea 100644 |
3684 | --- a/drivers/usb/host/xhci-ring.c |
3685 | +++ b/drivers/usb/host/xhci-ring.c |
3686 | @@ -260,23 +260,76 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci) |
3687 | readl(&xhci->dba->doorbell[0]); |
3688 | } |
3689 | |
3690 | -static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) |
3691 | +static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay) |
3692 | +{ |
3693 | + return mod_delayed_work(system_wq, &xhci->cmd_timer, delay); |
3694 | +} |
3695 | + |
3696 | +static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci) |
3697 | +{ |
3698 | + return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command, |
3699 | + cmd_list); |
3700 | +} |
3701 | + |
3702 | +/* |
3703 | + * Turn all commands on command ring with status set to "aborted" to no-op trbs. |
3704 | + * If there are other commands waiting then restart the ring and kick the timer. |
3705 | + * This must be called with command ring stopped and xhci->lock held. |
3706 | + */ |
3707 | +static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, |
3708 | + struct xhci_command *cur_cmd) |
3709 | +{ |
3710 | + struct xhci_command *i_cmd; |
3711 | + u32 cycle_state; |
3712 | + |
3713 | + /* Turn all aborted commands in list to no-ops, then restart */ |
3714 | + list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { |
3715 | + |
3716 | + if (i_cmd->status != COMP_CMD_ABORT) |
3717 | + continue; |
3718 | + |
3719 | + i_cmd->status = COMP_CMD_STOP; |
3720 | + |
3721 | + xhci_dbg(xhci, "Turn aborted command %p to no-op\n", |
3722 | + i_cmd->command_trb); |
3723 | + /* get cycle state from the original cmd trb */ |
3724 | + cycle_state = le32_to_cpu( |
3725 | + i_cmd->command_trb->generic.field[3]) & TRB_CYCLE; |
3726 | + /* modify the command trb to no-op command */ |
3727 | + i_cmd->command_trb->generic.field[0] = 0; |
3728 | + i_cmd->command_trb->generic.field[1] = 0; |
3729 | + i_cmd->command_trb->generic.field[2] = 0; |
3730 | + i_cmd->command_trb->generic.field[3] = cpu_to_le32( |
3731 | + TRB_TYPE(TRB_CMD_NOOP) | cycle_state); |
3732 | + |
3733 | + /* |
3734 | + * caller waiting for completion is called when command |
3735 | + * completion event is received for these no-op commands |
3736 | + */ |
3737 | + } |
3738 | + |
3739 | + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; |
3740 | + |
3741 | + /* ring command ring doorbell to restart the command ring */ |
3742 | + if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && |
3743 | + !(xhci->xhc_state & XHCI_STATE_DYING)) { |
3744 | + xhci->current_cmd = cur_cmd; |
3745 | + xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); |
3746 | + xhci_ring_cmd_db(xhci); |
3747 | + } |
3748 | +} |
3749 | + |
3750 | +/* Must be called with xhci->lock held, releases and aquires lock back */ |
3751 | +static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) |
3752 | { |
3753 | u64 temp_64; |
3754 | int ret; |
3755 | |
3756 | xhci_dbg(xhci, "Abort command ring\n"); |
3757 | |
3758 | - temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
3759 | - xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; |
3760 | + reinit_completion(&xhci->cmd_ring_stop_completion); |
3761 | |
3762 | - /* |
3763 | - * Writing the CMD_RING_ABORT bit should cause a cmd completion event, |
3764 | - * however on some host hw the CMD_RING_RUNNING bit is correctly cleared |
3765 | - * but the completion event in never sent. Use the cmd timeout timer to |
3766 | - * handle those cases. Use twice the time to cover the bit polling retry |
3767 | - */ |
3768 | - mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT)); |
3769 | + temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
3770 | xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, |
3771 | &xhci->op_regs->cmd_ring); |
3772 | |
3773 | @@ -296,16 +349,30 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) |
3774 | udelay(1000); |
3775 | ret = xhci_handshake(&xhci->op_regs->cmd_ring, |
3776 | CMD_RING_RUNNING, 0, 3 * 1000 * 1000); |
3777 | - if (ret == 0) |
3778 | - return 0; |
3779 | - |
3780 | - xhci_err(xhci, "Stopped the command ring failed, " |
3781 | - "maybe the host is dead\n"); |
3782 | - del_timer(&xhci->cmd_timer); |
3783 | - xhci->xhc_state |= XHCI_STATE_DYING; |
3784 | - xhci_quiesce(xhci); |
3785 | - xhci_halt(xhci); |
3786 | - return -ESHUTDOWN; |
3787 | + if (ret < 0) { |
3788 | + xhci_err(xhci, "Stopped the command ring failed, " |
3789 | + "maybe the host is dead\n"); |
3790 | + xhci->xhc_state |= XHCI_STATE_DYING; |
3791 | + xhci_quiesce(xhci); |
3792 | + xhci_halt(xhci); |
3793 | + return -ESHUTDOWN; |
3794 | + } |
3795 | + } |
3796 | + /* |
3797 | + * Writing the CMD_RING_ABORT bit should cause a cmd completion event, |
3798 | + * however on some host hw the CMD_RING_RUNNING bit is correctly cleared |
3799 | + * but the completion event in never sent. Wait 2 secs (arbitrary |
3800 | + * number) to handle those cases after negation of CMD_RING_RUNNING. |
3801 | + */ |
3802 | + spin_unlock_irqrestore(&xhci->lock, flags); |
3803 | + ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion, |
3804 | + msecs_to_jiffies(2000)); |
3805 | + spin_lock_irqsave(&xhci->lock, flags); |
3806 | + if (!ret) { |
3807 | + xhci_dbg(xhci, "No stop event for abort, ring start fail?\n"); |
3808 | + xhci_cleanup_command_queue(xhci); |
3809 | + } else { |
3810 | + xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci)); |
3811 | } |
3812 | |
3813 | return 0; |
3814 | @@ -1211,101 +1278,62 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci) |
3815 | xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT); |
3816 | } |
3817 | |
3818 | -/* |
3819 | - * Turn all commands on command ring with status set to "aborted" to no-op trbs. |
3820 | - * If there are other commands waiting then restart the ring and kick the timer. |
3821 | - * This must be called with command ring stopped and xhci->lock held. |
3822 | - */ |
3823 | -static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, |
3824 | - struct xhci_command *cur_cmd) |
3825 | -{ |
3826 | - struct xhci_command *i_cmd, *tmp_cmd; |
3827 | - u32 cycle_state; |
3828 | - |
3829 | - /* Turn all aborted commands in list to no-ops, then restart */ |
3830 | - list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list, |
3831 | - cmd_list) { |
3832 | - |
3833 | - if (i_cmd->status != COMP_CMD_ABORT) |
3834 | - continue; |
3835 | - |
3836 | - i_cmd->status = COMP_CMD_STOP; |
3837 | - |
3838 | - xhci_dbg(xhci, "Turn aborted command %p to no-op\n", |
3839 | - i_cmd->command_trb); |
3840 | - /* get cycle state from the original cmd trb */ |
3841 | - cycle_state = le32_to_cpu( |
3842 | - i_cmd->command_trb->generic.field[3]) & TRB_CYCLE; |
3843 | - /* modify the command trb to no-op command */ |
3844 | - i_cmd->command_trb->generic.field[0] = 0; |
3845 | - i_cmd->command_trb->generic.field[1] = 0; |
3846 | - i_cmd->command_trb->generic.field[2] = 0; |
3847 | - i_cmd->command_trb->generic.field[3] = cpu_to_le32( |
3848 | - TRB_TYPE(TRB_CMD_NOOP) | cycle_state); |
3849 | - |
3850 | - /* |
3851 | - * caller waiting for completion is called when command |
3852 | - * completion event is received for these no-op commands |
3853 | - */ |
3854 | - } |
3855 | - |
3856 | - xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; |
3857 | - |
3858 | - /* ring command ring doorbell to restart the command ring */ |
3859 | - if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && |
3860 | - !(xhci->xhc_state & XHCI_STATE_DYING)) { |
3861 | - xhci->current_cmd = cur_cmd; |
3862 | - mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); |
3863 | - xhci_ring_cmd_db(xhci); |
3864 | - } |
3865 | - return; |
3866 | -} |
3867 | - |
3868 | - |
3869 | -void xhci_handle_command_timeout(unsigned long data) |
3870 | +void xhci_handle_command_timeout(struct work_struct *work) |
3871 | { |
3872 | struct xhci_hcd *xhci; |
3873 | int ret; |
3874 | unsigned long flags; |
3875 | u64 hw_ring_state; |
3876 | - bool second_timeout = false; |
3877 | - xhci = (struct xhci_hcd *) data; |
3878 | |
3879 | - /* mark this command to be cancelled */ |
3880 | + xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer); |
3881 | + |
3882 | spin_lock_irqsave(&xhci->lock, flags); |
3883 | - if (xhci->current_cmd) { |
3884 | - if (xhci->current_cmd->status == COMP_CMD_ABORT) |
3885 | - second_timeout = true; |
3886 | - xhci->current_cmd->status = COMP_CMD_ABORT; |
3887 | + |
3888 | + /* |
3889 | + * If timeout work is pending, or current_cmd is NULL, it means we |
3890 | + * raced with command completion. Command is handled so just return. |
3891 | + */ |
3892 | + if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) { |
3893 | + spin_unlock_irqrestore(&xhci->lock, flags); |
3894 | + return; |
3895 | } |
3896 | + /* mark this command to be cancelled */ |
3897 | + xhci->current_cmd->status = COMP_CMD_ABORT; |
3898 | |
3899 | /* Make sure command ring is running before aborting it */ |
3900 | hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
3901 | if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && |
3902 | (hw_ring_state & CMD_RING_RUNNING)) { |
3903 | - spin_unlock_irqrestore(&xhci->lock, flags); |
3904 | + /* Prevent new doorbell, and start command abort */ |
3905 | + xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; |
3906 | xhci_dbg(xhci, "Command timeout\n"); |
3907 | - ret = xhci_abort_cmd_ring(xhci); |
3908 | + ret = xhci_abort_cmd_ring(xhci, flags); |
3909 | if (unlikely(ret == -ESHUTDOWN)) { |
3910 | xhci_err(xhci, "Abort command ring failed\n"); |
3911 | xhci_cleanup_command_queue(xhci); |
3912 | + spin_unlock_irqrestore(&xhci->lock, flags); |
3913 | usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); |
3914 | xhci_dbg(xhci, "xHCI host controller is dead.\n"); |
3915 | + |
3916 | + return; |
3917 | } |
3918 | - return; |
3919 | + |
3920 | + goto time_out_completed; |
3921 | } |
3922 | |
3923 | - /* command ring failed to restart, or host removed. Bail out */ |
3924 | - if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) { |
3925 | - spin_unlock_irqrestore(&xhci->lock, flags); |
3926 | - xhci_dbg(xhci, "command timed out twice, ring start fail?\n"); |
3927 | + /* host removed. Bail out */ |
3928 | + if (xhci->xhc_state & XHCI_STATE_REMOVING) { |
3929 | + xhci_dbg(xhci, "host removed, ring start fail?\n"); |
3930 | xhci_cleanup_command_queue(xhci); |
3931 | - return; |
3932 | + |
3933 | + goto time_out_completed; |
3934 | } |
3935 | |
3936 | /* command timeout on stopped ring, ring can't be aborted */ |
3937 | xhci_dbg(xhci, "Command timeout on stopped ring\n"); |
3938 | xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); |
3939 | + |
3940 | +time_out_completed: |
3941 | spin_unlock_irqrestore(&xhci->lock, flags); |
3942 | return; |
3943 | } |
3944 | @@ -1338,7 +1366,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, |
3945 | |
3946 | cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); |
3947 | |
3948 | - del_timer(&xhci->cmd_timer); |
3949 | + cancel_delayed_work(&xhci->cmd_timer); |
3950 | |
3951 | trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); |
3952 | |
3953 | @@ -1346,7 +1374,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, |
3954 | |
3955 | /* If CMD ring stopped we own the trbs between enqueue and dequeue */ |
3956 | if (cmd_comp_code == COMP_CMD_STOP) { |
3957 | - xhci_handle_stopped_cmd_ring(xhci, cmd); |
3958 | + complete_all(&xhci->cmd_ring_stop_completion); |
3959 | return; |
3960 | } |
3961 | |
3962 | @@ -1364,8 +1392,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, |
3963 | */ |
3964 | if (cmd_comp_code == COMP_CMD_ABORT) { |
3965 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
3966 | - if (cmd->status == COMP_CMD_ABORT) |
3967 | + if (cmd->status == COMP_CMD_ABORT) { |
3968 | + if (xhci->current_cmd == cmd) |
3969 | + xhci->current_cmd = NULL; |
3970 | goto event_handled; |
3971 | + } |
3972 | } |
3973 | |
3974 | cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); |
3975 | @@ -1426,7 +1457,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, |
3976 | if (cmd->cmd_list.next != &xhci->cmd_list) { |
3977 | xhci->current_cmd = list_entry(cmd->cmd_list.next, |
3978 | struct xhci_command, cmd_list); |
3979 | - mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); |
3980 | + xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); |
3981 | + } else if (xhci->current_cmd == cmd) { |
3982 | + xhci->current_cmd = NULL; |
3983 | } |
3984 | |
3985 | event_handled: |
3986 | @@ -3920,9 +3953,9 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, |
3987 | |
3988 | /* if there are no other commands queued we start the timeout timer */ |
3989 | if (xhci->cmd_list.next == &cmd->cmd_list && |
3990 | - !timer_pending(&xhci->cmd_timer)) { |
3991 | + !delayed_work_pending(&xhci->cmd_timer)) { |
3992 | xhci->current_cmd = cmd; |
3993 | - mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); |
3994 | + xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); |
3995 | } |
3996 | |
3997 | queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, |
3998 | diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c |
3999 | index 1a4ca02729c2..ad0624386950 100644 |
4000 | --- a/drivers/usb/host/xhci.c |
4001 | +++ b/drivers/usb/host/xhci.c |
4002 | @@ -3783,8 +3783,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, |
4003 | |
4004 | mutex_lock(&xhci->mutex); |
4005 | |
4006 | - if (xhci->xhc_state) /* dying, removing or halted */ |
4007 | + if (xhci->xhc_state) { /* dying, removing or halted */ |
4008 | + ret = -ESHUTDOWN; |
4009 | goto out; |
4010 | + } |
4011 | |
4012 | if (!udev->slot_id) { |
4013 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
4014 | diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h |
4015 | index f945380035d0..c525722aa934 100644 |
4016 | --- a/drivers/usb/host/xhci.h |
4017 | +++ b/drivers/usb/host/xhci.h |
4018 | @@ -1571,7 +1571,8 @@ struct xhci_hcd { |
4019 | #define CMD_RING_STATE_STOPPED (1 << 2) |
4020 | struct list_head cmd_list; |
4021 | unsigned int cmd_ring_reserved_trbs; |
4022 | - struct timer_list cmd_timer; |
4023 | + struct delayed_work cmd_timer; |
4024 | + struct completion cmd_ring_stop_completion; |
4025 | struct xhci_command *current_cmd; |
4026 | struct xhci_ring *event_ring; |
4027 | struct xhci_erst erst; |
4028 | @@ -1941,7 +1942,7 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci, |
4029 | unsigned int slot_id, unsigned int ep_index, |
4030 | struct xhci_dequeue_state *deq_state); |
4031 | void xhci_stop_endpoint_command_watchdog(unsigned long arg); |
4032 | -void xhci_handle_command_timeout(unsigned long data); |
4033 | +void xhci_handle_command_timeout(struct work_struct *work); |
4034 | |
4035 | void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, |
4036 | unsigned int ep_index, unsigned int stream_id); |
4037 | diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c |
4038 | index 310238c6b5cd..896798071817 100644 |
4039 | --- a/drivers/usb/musb/blackfin.c |
4040 | +++ b/drivers/usb/musb/blackfin.c |
4041 | @@ -469,6 +469,7 @@ static const struct musb_platform_ops bfin_ops = { |
4042 | .init = bfin_musb_init, |
4043 | .exit = bfin_musb_exit, |
4044 | |
4045 | + .fifo_offset = bfin_fifo_offset, |
4046 | .readb = bfin_readb, |
4047 | .writeb = bfin_writeb, |
4048 | .readw = bfin_readw, |
4049 | diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h |
4050 | index 91817d77d59c..47331dbdde29 100644 |
4051 | --- a/drivers/usb/musb/musb_core.h |
4052 | +++ b/drivers/usb/musb/musb_core.h |
4053 | @@ -216,6 +216,7 @@ struct musb_platform_ops { |
4054 | void (*pre_root_reset_end)(struct musb *musb); |
4055 | void (*post_root_reset_end)(struct musb *musb); |
4056 | int (*phy_callback)(enum musb_vbus_id_status status); |
4057 | + void (*clear_ep_rxintr)(struct musb *musb, int epnum); |
4058 | }; |
4059 | |
4060 | /* |
4061 | @@ -626,4 +627,10 @@ static inline void musb_platform_post_root_reset_end(struct musb *musb) |
4062 | musb->ops->post_root_reset_end(musb); |
4063 | } |
4064 | |
4065 | +static inline void musb_platform_clear_ep_rxintr(struct musb *musb, int epnum) |
4066 | +{ |
4067 | + if (musb->ops->clear_ep_rxintr) |
4068 | + musb->ops->clear_ep_rxintr(musb, epnum); |
4069 | +} |
4070 | + |
4071 | #endif /* __MUSB_CORE_H__ */ |
4072 | diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c |
4073 | index feae1561b9ab..9f125e179acd 100644 |
4074 | --- a/drivers/usb/musb/musb_dsps.c |
4075 | +++ b/drivers/usb/musb/musb_dsps.c |
4076 | @@ -267,6 +267,17 @@ static void otg_timer(unsigned long _musb) |
4077 | pm_runtime_put_autosuspend(dev); |
4078 | } |
4079 | |
4080 | +void dsps_musb_clear_ep_rxintr(struct musb *musb, int epnum) |
4081 | +{ |
4082 | + u32 epintr; |
4083 | + struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent); |
4084 | + const struct dsps_musb_wrapper *wrp = glue->wrp; |
4085 | + |
4086 | + /* musb->lock might already been held */ |
4087 | + epintr = (1 << epnum) << wrp->rxep_shift; |
4088 | + musb_writel(musb->ctrl_base, wrp->epintr_status, epintr); |
4089 | +} |
4090 | + |
4091 | static irqreturn_t dsps_interrupt(int irq, void *hci) |
4092 | { |
4093 | struct musb *musb = hci; |
4094 | @@ -622,6 +633,7 @@ static struct musb_platform_ops dsps_ops = { |
4095 | |
4096 | .set_mode = dsps_musb_set_mode, |
4097 | .recover = dsps_musb_recover, |
4098 | + .clear_ep_rxintr = dsps_musb_clear_ep_rxintr, |
4099 | }; |
4100 | |
4101 | static u64 musb_dmamask = DMA_BIT_MASK(32); |
4102 | diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c |
4103 | index 53bc4ceefe89..806451418cfe 100644 |
4104 | --- a/drivers/usb/musb/musb_host.c |
4105 | +++ b/drivers/usb/musb/musb_host.c |
4106 | @@ -2374,12 +2374,11 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) |
4107 | int is_in = usb_pipein(urb->pipe); |
4108 | int status = 0; |
4109 | u16 csr; |
4110 | + struct dma_channel *dma = NULL; |
4111 | |
4112 | musb_ep_select(regs, hw_end); |
4113 | |
4114 | if (is_dma_capable()) { |
4115 | - struct dma_channel *dma; |
4116 | - |
4117 | dma = is_in ? ep->rx_channel : ep->tx_channel; |
4118 | if (dma) { |
4119 | status = ep->musb->dma_controller->channel_abort(dma); |
4120 | @@ -2395,10 +2394,9 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) |
4121 | /* giveback saves bulk toggle */ |
4122 | csr = musb_h_flush_rxfifo(ep, 0); |
4123 | |
4124 | - /* REVISIT we still get an irq; should likely clear the |
4125 | - * endpoint's irq status here to avoid bogus irqs. |
4126 | - * clearing that status is platform-specific... |
4127 | - */ |
4128 | + /* clear the endpoint's irq status here to avoid bogus irqs */ |
4129 | + if (is_dma_capable() && dma) |
4130 | + musb_platform_clear_ep_rxintr(musb, ep->epnum); |
4131 | } else if (ep->epnum) { |
4132 | musb_h_tx_flush_fifo(ep); |
4133 | csr = musb_readw(epio, MUSB_TXCSR); |
4134 | diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h |
4135 | index f7b13fd25257..a3dcbd55e436 100644 |
4136 | --- a/drivers/usb/musb/musbhsdma.h |
4137 | +++ b/drivers/usb/musb/musbhsdma.h |
4138 | @@ -157,5 +157,5 @@ struct musb_dma_controller { |
4139 | void __iomem *base; |
4140 | u8 channel_count; |
4141 | u8 used_channels; |
4142 | - u8 irq; |
4143 | + int irq; |
4144 | }; |
4145 | diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c |
4146 | index 42a1afe36a90..5f5f19813fde 100644 |
4147 | --- a/drivers/usb/phy/phy-am335x-control.c |
4148 | +++ b/drivers/usb/phy/phy-am335x-control.c |
4149 | @@ -134,10 +134,12 @@ struct phy_control *am335x_get_phy_control(struct device *dev) |
4150 | return NULL; |
4151 | |
4152 | dev = bus_find_device(&platform_bus_type, NULL, node, match); |
4153 | + of_node_put(node); |
4154 | if (!dev) |
4155 | return NULL; |
4156 | |
4157 | ctrl_usb = dev_get_drvdata(dev); |
4158 | + put_device(dev); |
4159 | if (!ctrl_usb) |
4160 | return NULL; |
4161 | return &ctrl_usb->phy_ctrl; |
4162 | diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c |
4163 | index 5f17a3b9916d..80260b08398b 100644 |
4164 | --- a/drivers/usb/serial/cyberjack.c |
4165 | +++ b/drivers/usb/serial/cyberjack.c |
4166 | @@ -50,6 +50,7 @@ |
4167 | #define CYBERJACK_PRODUCT_ID 0x0100 |
4168 | |
4169 | /* Function prototypes */ |
4170 | +static int cyberjack_attach(struct usb_serial *serial); |
4171 | static int cyberjack_port_probe(struct usb_serial_port *port); |
4172 | static int cyberjack_port_remove(struct usb_serial_port *port); |
4173 | static int cyberjack_open(struct tty_struct *tty, |
4174 | @@ -77,6 +78,7 @@ static struct usb_serial_driver cyberjack_device = { |
4175 | .description = "Reiner SCT Cyberjack USB card reader", |
4176 | .id_table = id_table, |
4177 | .num_ports = 1, |
4178 | + .attach = cyberjack_attach, |
4179 | .port_probe = cyberjack_port_probe, |
4180 | .port_remove = cyberjack_port_remove, |
4181 | .open = cyberjack_open, |
4182 | @@ -100,6 +102,14 @@ struct cyberjack_private { |
4183 | short wrsent; /* Data already sent */ |
4184 | }; |
4185 | |
4186 | +static int cyberjack_attach(struct usb_serial *serial) |
4187 | +{ |
4188 | + if (serial->num_bulk_out < serial->num_ports) |
4189 | + return -ENODEV; |
4190 | + |
4191 | + return 0; |
4192 | +} |
4193 | + |
4194 | static int cyberjack_port_probe(struct usb_serial_port *port) |
4195 | { |
4196 | struct cyberjack_private *priv; |
4197 | diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c |
4198 | index 97cabf803c2f..b2f2e87aed94 100644 |
4199 | --- a/drivers/usb/serial/garmin_gps.c |
4200 | +++ b/drivers/usb/serial/garmin_gps.c |
4201 | @@ -1043,6 +1043,7 @@ static int garmin_write_bulk(struct usb_serial_port *port, |
4202 | "%s - usb_submit_urb(write bulk) failed with status = %d\n", |
4203 | __func__, status); |
4204 | count = status; |
4205 | + kfree(buffer); |
4206 | } |
4207 | |
4208 | /* we are done with this urb, so let the host driver |
4209 | diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c |
4210 | index 11c05ce2f35f..36dfe9972b17 100644 |
4211 | --- a/drivers/usb/serial/io_edgeport.c |
4212 | +++ b/drivers/usb/serial/io_edgeport.c |
4213 | @@ -2754,6 +2754,11 @@ static int edge_startup(struct usb_serial *serial) |
4214 | EDGE_COMPATIBILITY_MASK1, |
4215 | EDGE_COMPATIBILITY_MASK2 }; |
4216 | |
4217 | + if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) { |
4218 | + dev_err(&serial->interface->dev, "missing endpoints\n"); |
4219 | + return -ENODEV; |
4220 | + } |
4221 | + |
4222 | dev = serial->dev; |
4223 | |
4224 | /* create our private serial structure */ |
4225 | diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c |
4226 | index fce82fd79f77..c02808a30436 100644 |
4227 | --- a/drivers/usb/serial/io_ti.c |
4228 | +++ b/drivers/usb/serial/io_ti.c |
4229 | @@ -1499,8 +1499,7 @@ static int do_boot_mode(struct edgeport_serial *serial, |
4230 | |
4231 | dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__); |
4232 | |
4233 | - /* return an error on purpose */ |
4234 | - return -ENODEV; |
4235 | + return 1; |
4236 | } |
4237 | |
4238 | stayinbootmode: |
4239 | @@ -1508,7 +1507,7 @@ static int do_boot_mode(struct edgeport_serial *serial, |
4240 | dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__); |
4241 | serial->product_info.TiMode = TI_MODE_BOOT; |
4242 | |
4243 | - return 0; |
4244 | + return 1; |
4245 | } |
4246 | |
4247 | static int ti_do_config(struct edgeport_port *port, int feature, int on) |
4248 | @@ -2549,6 +2548,13 @@ static int edge_startup(struct usb_serial *serial) |
4249 | int status; |
4250 | u16 product_id; |
4251 | |
4252 | + /* Make sure we have the required endpoints when in download mode. */ |
4253 | + if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) { |
4254 | + if (serial->num_bulk_in < serial->num_ports || |
4255 | + serial->num_bulk_out < serial->num_ports) |
4256 | + return -ENODEV; |
4257 | + } |
4258 | + |
4259 | /* create our private serial structure */ |
4260 | edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); |
4261 | if (!edge_serial) |
4262 | @@ -2556,14 +2562,18 @@ static int edge_startup(struct usb_serial *serial) |
4263 | |
4264 | mutex_init(&edge_serial->es_lock); |
4265 | edge_serial->serial = serial; |
4266 | + INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work); |
4267 | usb_set_serial_data(serial, edge_serial); |
4268 | |
4269 | status = download_fw(edge_serial); |
4270 | - if (status) { |
4271 | + if (status < 0) { |
4272 | kfree(edge_serial); |
4273 | return status; |
4274 | } |
4275 | |
4276 | + if (status > 0) |
4277 | + return 1; /* bind but do not register any ports */ |
4278 | + |
4279 | product_id = le16_to_cpu( |
4280 | edge_serial->serial->dev->descriptor.idProduct); |
4281 | |
4282 | @@ -2575,7 +2585,6 @@ static int edge_startup(struct usb_serial *serial) |
4283 | } |
4284 | } |
4285 | |
4286 | - INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work); |
4287 | edge_heartbeat_schedule(edge_serial); |
4288 | |
4289 | return 0; |
4290 | @@ -2583,6 +2592,9 @@ static int edge_startup(struct usb_serial *serial) |
4291 | |
4292 | static void edge_disconnect(struct usb_serial *serial) |
4293 | { |
4294 | + struct edgeport_serial *edge_serial = usb_get_serial_data(serial); |
4295 | + |
4296 | + cancel_delayed_work_sync(&edge_serial->heartbeat_work); |
4297 | } |
4298 | |
4299 | static void edge_release(struct usb_serial *serial) |
4300 | diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c |
4301 | index 344b4eea4bd5..d57fb5199218 100644 |
4302 | --- a/drivers/usb/serial/iuu_phoenix.c |
4303 | +++ b/drivers/usb/serial/iuu_phoenix.c |
4304 | @@ -68,6 +68,16 @@ struct iuu_private { |
4305 | u32 clk; |
4306 | }; |
4307 | |
4308 | +static int iuu_attach(struct usb_serial *serial) |
4309 | +{ |
4310 | + unsigned char num_ports = serial->num_ports; |
4311 | + |
4312 | + if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports) |
4313 | + return -ENODEV; |
4314 | + |
4315 | + return 0; |
4316 | +} |
4317 | + |
4318 | static int iuu_port_probe(struct usb_serial_port *port) |
4319 | { |
4320 | struct iuu_private *priv; |
4321 | @@ -1196,6 +1206,7 @@ static struct usb_serial_driver iuu_device = { |
4322 | .tiocmset = iuu_tiocmset, |
4323 | .set_termios = iuu_set_termios, |
4324 | .init_termios = iuu_init_termios, |
4325 | + .attach = iuu_attach, |
4326 | .port_probe = iuu_port_probe, |
4327 | .port_remove = iuu_port_remove, |
4328 | }; |
4329 | diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c |
4330 | index e49ad0c63ad8..83523fcf6fb9 100644 |
4331 | --- a/drivers/usb/serial/keyspan_pda.c |
4332 | +++ b/drivers/usb/serial/keyspan_pda.c |
4333 | @@ -699,6 +699,19 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw"); |
4334 | MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw"); |
4335 | #endif |
4336 | |
4337 | +static int keyspan_pda_attach(struct usb_serial *serial) |
4338 | +{ |
4339 | + unsigned char num_ports = serial->num_ports; |
4340 | + |
4341 | + if (serial->num_bulk_out < num_ports || |
4342 | + serial->num_interrupt_in < num_ports) { |
4343 | + dev_err(&serial->interface->dev, "missing endpoints\n"); |
4344 | + return -ENODEV; |
4345 | + } |
4346 | + |
4347 | + return 0; |
4348 | +} |
4349 | + |
4350 | static int keyspan_pda_port_probe(struct usb_serial_port *port) |
4351 | { |
4352 | |
4353 | @@ -776,6 +789,7 @@ static struct usb_serial_driver keyspan_pda_device = { |
4354 | .break_ctl = keyspan_pda_break_ctl, |
4355 | .tiocmget = keyspan_pda_tiocmget, |
4356 | .tiocmset = keyspan_pda_tiocmset, |
4357 | + .attach = keyspan_pda_attach, |
4358 | .port_probe = keyspan_pda_port_probe, |
4359 | .port_remove = keyspan_pda_port_remove, |
4360 | }; |
4361 | diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c |
4362 | index 6f29bfadbe33..0ee190fc1bf8 100644 |
4363 | --- a/drivers/usb/serial/kl5kusb105.c |
4364 | +++ b/drivers/usb/serial/kl5kusb105.c |
4365 | @@ -311,6 +311,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) |
4366 | if (rc < 0) { |
4367 | dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc); |
4368 | retval = rc; |
4369 | + goto err_generic_close; |
4370 | } else |
4371 | dev_dbg(&port->dev, "%s - enabled reading\n", __func__); |
4372 | |
4373 | @@ -337,6 +338,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) |
4374 | 0, /* index */ |
4375 | NULL, 0, |
4376 | KLSI_TIMEOUT); |
4377 | +err_generic_close: |
4378 | usb_serial_generic_close(port); |
4379 | err_free_cfg: |
4380 | kfree(cfg); |
4381 | diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c |
4382 | index 2363654cafc9..813035f51fe7 100644 |
4383 | --- a/drivers/usb/serial/kobil_sct.c |
4384 | +++ b/drivers/usb/serial/kobil_sct.c |
4385 | @@ -51,6 +51,7 @@ |
4386 | |
4387 | |
4388 | /* Function prototypes */ |
4389 | +static int kobil_attach(struct usb_serial *serial); |
4390 | static int kobil_port_probe(struct usb_serial_port *probe); |
4391 | static int kobil_port_remove(struct usb_serial_port *probe); |
4392 | static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port); |
4393 | @@ -86,6 +87,7 @@ static struct usb_serial_driver kobil_device = { |
4394 | .description = "KOBIL USB smart card terminal", |
4395 | .id_table = id_table, |
4396 | .num_ports = 1, |
4397 | + .attach = kobil_attach, |
4398 | .port_probe = kobil_port_probe, |
4399 | .port_remove = kobil_port_remove, |
4400 | .ioctl = kobil_ioctl, |
4401 | @@ -113,6 +115,16 @@ struct kobil_private { |
4402 | }; |
4403 | |
4404 | |
4405 | +static int kobil_attach(struct usb_serial *serial) |
4406 | +{ |
4407 | + if (serial->num_interrupt_out < serial->num_ports) { |
4408 | + dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n"); |
4409 | + return -ENODEV; |
4410 | + } |
4411 | + |
4412 | + return 0; |
4413 | +} |
4414 | + |
4415 | static int kobil_port_probe(struct usb_serial_port *port) |
4416 | { |
4417 | struct usb_serial *serial = port->serial; |
4418 | diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c |
4419 | index de9992b492b0..136ff5e1b7c1 100644 |
4420 | --- a/drivers/usb/serial/mos7720.c |
4421 | +++ b/drivers/usb/serial/mos7720.c |
4422 | @@ -65,8 +65,6 @@ struct moschip_port { |
4423 | struct urb *write_urb_pool[NUM_URBS]; |
4424 | }; |
4425 | |
4426 | -static struct usb_serial_driver moschip7720_2port_driver; |
4427 | - |
4428 | #define USB_VENDOR_ID_MOSCHIP 0x9710 |
4429 | #define MOSCHIP_DEVICE_ID_7720 0x7720 |
4430 | #define MOSCHIP_DEVICE_ID_7715 0x7715 |
4431 | @@ -970,25 +968,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb) |
4432 | tty_port_tty_wakeup(&mos7720_port->port->port); |
4433 | } |
4434 | |
4435 | -/* |
4436 | - * mos77xx_probe |
4437 | - * this function installs the appropriate read interrupt endpoint callback |
4438 | - * depending on whether the device is a 7720 or 7715, thus avoiding costly |
4439 | - * run-time checks in the high-frequency callback routine itself. |
4440 | - */ |
4441 | -static int mos77xx_probe(struct usb_serial *serial, |
4442 | - const struct usb_device_id *id) |
4443 | -{ |
4444 | - if (id->idProduct == MOSCHIP_DEVICE_ID_7715) |
4445 | - moschip7720_2port_driver.read_int_callback = |
4446 | - mos7715_interrupt_callback; |
4447 | - else |
4448 | - moschip7720_2port_driver.read_int_callback = |
4449 | - mos7720_interrupt_callback; |
4450 | - |
4451 | - return 0; |
4452 | -} |
4453 | - |
4454 | static int mos77xx_calc_num_ports(struct usb_serial *serial) |
4455 | { |
4456 | u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); |
4457 | @@ -1920,6 +1899,11 @@ static int mos7720_startup(struct usb_serial *serial) |
4458 | u16 product; |
4459 | int ret_val; |
4460 | |
4461 | + if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) { |
4462 | + dev_err(&serial->interface->dev, "missing bulk endpoints\n"); |
4463 | + return -ENODEV; |
4464 | + } |
4465 | + |
4466 | product = le16_to_cpu(serial->dev->descriptor.idProduct); |
4467 | dev = serial->dev; |
4468 | |
4469 | @@ -1944,19 +1928,18 @@ static int mos7720_startup(struct usb_serial *serial) |
4470 | tmp->interrupt_in_endpointAddress; |
4471 | serial->port[1]->interrupt_in_urb = NULL; |
4472 | serial->port[1]->interrupt_in_buffer = NULL; |
4473 | + |
4474 | + if (serial->port[0]->interrupt_in_urb) { |
4475 | + struct urb *urb = serial->port[0]->interrupt_in_urb; |
4476 | + |
4477 | + urb->complete = mos7715_interrupt_callback; |
4478 | + } |
4479 | } |
4480 | |
4481 | /* setting configuration feature to one */ |
4482 | usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), |
4483 | (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); |
4484 | |
4485 | - /* start the interrupt urb */ |
4486 | - ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); |
4487 | - if (ret_val) |
4488 | - dev_err(&dev->dev, |
4489 | - "%s - Error %d submitting control urb\n", |
4490 | - __func__, ret_val); |
4491 | - |
4492 | #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT |
4493 | if (product == MOSCHIP_DEVICE_ID_7715) { |
4494 | ret_val = mos7715_parport_init(serial); |
4495 | @@ -1964,6 +1947,13 @@ static int mos7720_startup(struct usb_serial *serial) |
4496 | return ret_val; |
4497 | } |
4498 | #endif |
4499 | + /* start the interrupt urb */ |
4500 | + ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); |
4501 | + if (ret_val) { |
4502 | + dev_err(&dev->dev, "failed to submit interrupt urb: %d\n", |
4503 | + ret_val); |
4504 | + } |
4505 | + |
4506 | /* LSR For Port 1 */ |
4507 | read_mos_reg(serial, 0, MOS7720_LSR, &data); |
4508 | dev_dbg(&dev->dev, "LSR:%x\n", data); |
4509 | @@ -1973,6 +1963,8 @@ static int mos7720_startup(struct usb_serial *serial) |
4510 | |
4511 | static void mos7720_release(struct usb_serial *serial) |
4512 | { |
4513 | + usb_kill_urb(serial->port[0]->interrupt_in_urb); |
4514 | + |
4515 | #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT |
4516 | /* close the parallel port */ |
4517 | |
4518 | @@ -2056,7 +2048,6 @@ static struct usb_serial_driver moschip7720_2port_driver = { |
4519 | .close = mos7720_close, |
4520 | .throttle = mos7720_throttle, |
4521 | .unthrottle = mos7720_unthrottle, |
4522 | - .probe = mos77xx_probe, |
4523 | .attach = mos7720_startup, |
4524 | .release = mos7720_release, |
4525 | .port_probe = mos7720_port_probe, |
4526 | @@ -2070,7 +2061,7 @@ static struct usb_serial_driver moschip7720_2port_driver = { |
4527 | .chars_in_buffer = mos7720_chars_in_buffer, |
4528 | .break_ctl = mos7720_break, |
4529 | .read_bulk_callback = mos7720_bulk_in_callback, |
4530 | - .read_int_callback = NULL /* dynamically assigned in probe() */ |
4531 | + .read_int_callback = mos7720_interrupt_callback, |
4532 | }; |
4533 | |
4534 | static struct usb_serial_driver * const serial_drivers[] = { |
4535 | diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c |
4536 | index 57426d703a09..4f9af47e6a29 100644 |
4537 | --- a/drivers/usb/serial/mos7840.c |
4538 | +++ b/drivers/usb/serial/mos7840.c |
4539 | @@ -2116,6 +2116,17 @@ static int mos7840_calc_num_ports(struct usb_serial *serial) |
4540 | return mos7840_num_ports; |
4541 | } |
4542 | |
4543 | +static int mos7840_attach(struct usb_serial *serial) |
4544 | +{ |
4545 | + if (serial->num_bulk_in < serial->num_ports || |
4546 | + serial->num_bulk_out < serial->num_ports) { |
4547 | + dev_err(&serial->interface->dev, "missing endpoints\n"); |
4548 | + return -ENODEV; |
4549 | + } |
4550 | + |
4551 | + return 0; |
4552 | +} |
4553 | + |
4554 | static int mos7840_port_probe(struct usb_serial_port *port) |
4555 | { |
4556 | struct usb_serial *serial = port->serial; |
4557 | @@ -2391,6 +2402,7 @@ static struct usb_serial_driver moschip7840_4port_device = { |
4558 | .tiocmset = mos7840_tiocmset, |
4559 | .tiocmiwait = usb_serial_generic_tiocmiwait, |
4560 | .get_icount = usb_serial_generic_get_icount, |
4561 | + .attach = mos7840_attach, |
4562 | .port_probe = mos7840_port_probe, |
4563 | .port_remove = mos7840_port_remove, |
4564 | .read_bulk_callback = mos7840_bulk_in_callback, |
4565 | diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c |
4566 | index f6c6900bccf0..a180b17d2432 100644 |
4567 | --- a/drivers/usb/serial/omninet.c |
4568 | +++ b/drivers/usb/serial/omninet.c |
4569 | @@ -38,6 +38,7 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, |
4570 | const unsigned char *buf, int count); |
4571 | static int omninet_write_room(struct tty_struct *tty); |
4572 | static void omninet_disconnect(struct usb_serial *serial); |
4573 | +static int omninet_attach(struct usb_serial *serial); |
4574 | static int omninet_port_probe(struct usb_serial_port *port); |
4575 | static int omninet_port_remove(struct usb_serial_port *port); |
4576 | |
4577 | @@ -56,6 +57,7 @@ static struct usb_serial_driver zyxel_omninet_device = { |
4578 | .description = "ZyXEL - omni.net lcd plus usb", |
4579 | .id_table = id_table, |
4580 | .num_ports = 1, |
4581 | + .attach = omninet_attach, |
4582 | .port_probe = omninet_port_probe, |
4583 | .port_remove = omninet_port_remove, |
4584 | .open = omninet_open, |
4585 | @@ -104,6 +106,17 @@ struct omninet_data { |
4586 | __u8 od_outseq; /* Sequence number for bulk_out URBs */ |
4587 | }; |
4588 | |
4589 | +static int omninet_attach(struct usb_serial *serial) |
4590 | +{ |
4591 | + /* The second bulk-out endpoint is used for writing. */ |
4592 | + if (serial->num_bulk_out < 2) { |
4593 | + dev_err(&serial->interface->dev, "missing endpoints\n"); |
4594 | + return -ENODEV; |
4595 | + } |
4596 | + |
4597 | + return 0; |
4598 | +} |
4599 | + |
4600 | static int omninet_port_probe(struct usb_serial_port *port) |
4601 | { |
4602 | struct omninet_data *od; |
4603 | diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c |
4604 | index a4b88bc038b6..b8bf52bf7a94 100644 |
4605 | --- a/drivers/usb/serial/oti6858.c |
4606 | +++ b/drivers/usb/serial/oti6858.c |
4607 | @@ -134,6 +134,7 @@ static int oti6858_chars_in_buffer(struct tty_struct *tty); |
4608 | static int oti6858_tiocmget(struct tty_struct *tty); |
4609 | static int oti6858_tiocmset(struct tty_struct *tty, |
4610 | unsigned int set, unsigned int clear); |
4611 | +static int oti6858_attach(struct usb_serial *serial); |
4612 | static int oti6858_port_probe(struct usb_serial_port *port); |
4613 | static int oti6858_port_remove(struct usb_serial_port *port); |
4614 | |
4615 | @@ -158,6 +159,7 @@ static struct usb_serial_driver oti6858_device = { |
4616 | .write_bulk_callback = oti6858_write_bulk_callback, |
4617 | .write_room = oti6858_write_room, |
4618 | .chars_in_buffer = oti6858_chars_in_buffer, |
4619 | + .attach = oti6858_attach, |
4620 | .port_probe = oti6858_port_probe, |
4621 | .port_remove = oti6858_port_remove, |
4622 | }; |
4623 | @@ -324,6 +326,20 @@ static void send_data(struct work_struct *work) |
4624 | usb_serial_port_softint(port); |
4625 | } |
4626 | |
4627 | +static int oti6858_attach(struct usb_serial *serial) |
4628 | +{ |
4629 | + unsigned char num_ports = serial->num_ports; |
4630 | + |
4631 | + if (serial->num_bulk_in < num_ports || |
4632 | + serial->num_bulk_out < num_ports || |
4633 | + serial->num_interrupt_in < num_ports) { |
4634 | + dev_err(&serial->interface->dev, "missing endpoints\n"); |
4635 | + return -ENODEV; |
4636 | + } |
4637 | + |
4638 | + return 0; |
4639 | +} |
4640 | + |
4641 | static int oti6858_port_probe(struct usb_serial_port *port) |
4642 | { |
4643 | struct oti6858_private *priv; |
4644 | diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c |
4645 | index ae682e4eeaef..46fca6b75846 100644 |
4646 | --- a/drivers/usb/serial/pl2303.c |
4647 | +++ b/drivers/usb/serial/pl2303.c |
4648 | @@ -220,9 +220,17 @@ static int pl2303_probe(struct usb_serial *serial, |
4649 | static int pl2303_startup(struct usb_serial *serial) |
4650 | { |
4651 | struct pl2303_serial_private *spriv; |
4652 | + unsigned char num_ports = serial->num_ports; |
4653 | enum pl2303_type type = TYPE_01; |
4654 | unsigned char *buf; |
4655 | |
4656 | + if (serial->num_bulk_in < num_ports || |
4657 | + serial->num_bulk_out < num_ports || |
4658 | + serial->num_interrupt_in < num_ports) { |
4659 | + dev_err(&serial->interface->dev, "missing endpoints\n"); |
4660 | + return -ENODEV; |
4661 | + } |
4662 | + |
4663 | spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); |
4664 | if (!spriv) |
4665 | return -ENOMEM; |
4666 | diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c |
4667 | index 85acb50a7ee2..bd1a1307e0f0 100644 |
4668 | --- a/drivers/usb/serial/quatech2.c |
4669 | +++ b/drivers/usb/serial/quatech2.c |
4670 | @@ -408,16 +408,12 @@ static void qt2_close(struct usb_serial_port *port) |
4671 | { |
4672 | struct usb_serial *serial; |
4673 | struct qt2_port_private *port_priv; |
4674 | - unsigned long flags; |
4675 | int i; |
4676 | |
4677 | serial = port->serial; |
4678 | port_priv = usb_get_serial_port_data(port); |
4679 | |
4680 | - spin_lock_irqsave(&port_priv->urb_lock, flags); |
4681 | usb_kill_urb(port_priv->write_urb); |
4682 | - port_priv->urb_in_use = false; |
4683 | - spin_unlock_irqrestore(&port_priv->urb_lock, flags); |
4684 | |
4685 | /* flush the port transmit buffer */ |
4686 | i = usb_control_msg(serial->dev, |
4687 | diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c |
4688 | index ef0dbf0703c5..475e6c31b266 100644 |
4689 | --- a/drivers/usb/serial/spcp8x5.c |
4690 | +++ b/drivers/usb/serial/spcp8x5.c |
4691 | @@ -154,6 +154,19 @@ static int spcp8x5_probe(struct usb_serial *serial, |
4692 | return 0; |
4693 | } |
4694 | |
4695 | +static int spcp8x5_attach(struct usb_serial *serial) |
4696 | +{ |
4697 | + unsigned char num_ports = serial->num_ports; |
4698 | + |
4699 | + if (serial->num_bulk_in < num_ports || |
4700 | + serial->num_bulk_out < num_ports) { |
4701 | + dev_err(&serial->interface->dev, "missing endpoints\n"); |
4702 | + return -ENODEV; |
4703 | + } |
4704 | + |
4705 | + return 0; |
4706 | +} |
4707 | + |
4708 | static int spcp8x5_port_probe(struct usb_serial_port *port) |
4709 | { |
4710 | const struct usb_device_id *id = usb_get_serial_data(port->serial); |
4711 | @@ -477,6 +490,7 @@ static struct usb_serial_driver spcp8x5_device = { |
4712 | .tiocmget = spcp8x5_tiocmget, |
4713 | .tiocmset = spcp8x5_tiocmset, |
4714 | .probe = spcp8x5_probe, |
4715 | + .attach = spcp8x5_attach, |
4716 | .port_probe = spcp8x5_port_probe, |
4717 | .port_remove = spcp8x5_port_remove, |
4718 | }; |
4719 | diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c |
4720 | index a8b9bdba314f..bdbddbc8bd4d 100644 |
4721 | --- a/drivers/usb/serial/ti_usb_3410_5052.c |
4722 | +++ b/drivers/usb/serial/ti_usb_3410_5052.c |
4723 | @@ -579,6 +579,13 @@ static int ti_startup(struct usb_serial *serial) |
4724 | goto free_tdev; |
4725 | } |
4726 | |
4727 | + if (serial->num_bulk_in < serial->num_ports || |
4728 | + serial->num_bulk_out < serial->num_ports) { |
4729 | + dev_err(&serial->interface->dev, "missing endpoints\n"); |
4730 | + status = -ENODEV; |
4731 | + goto free_tdev; |
4732 | + } |
4733 | + |
4734 | return 0; |
4735 | |
4736 | free_tdev: |
4737 | diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h |
4738 | index af3c7eecff91..16cc18369111 100644 |
4739 | --- a/drivers/usb/storage/unusual_devs.h |
4740 | +++ b/drivers/usb/storage/unusual_devs.h |
4741 | @@ -2109,6 +2109,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114, |
4742 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
4743 | US_FL_BROKEN_FUA ), |
4744 | |
4745 | +/* Reported-by George Cherian <george.cherian@cavium.com> */ |
4746 | +UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, |
4747 | + "JMicron", |
4748 | + "JMS56x", |
4749 | + USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
4750 | + US_FL_NO_REPORT_OPCODES), |
4751 | + |
4752 | /* |
4753 | * Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) |
4754 | * and Mac USB Dock USB-SCSI */ |
4755 | diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c |
4756 | index 6865663aac69..abc18847b98d 100644 |
4757 | --- a/fs/crypto/policy.c |
4758 | +++ b/fs/crypto/policy.c |
4759 | @@ -171,6 +171,11 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) |
4760 | BUG_ON(1); |
4761 | } |
4762 | |
4763 | + /* No restrictions on file types which are never encrypted */ |
4764 | + if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) && |
4765 | + !S_ISLNK(child->i_mode)) |
4766 | + return 1; |
4767 | + |
4768 | /* no restrictions if the parent directory is not encrypted */ |
4769 | if (!parent->i_sb->s_cop->is_encrypted(parent)) |
4770 | return 1; |
4771 | diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c |
4772 | index 9ae194fd2fdb..14db4b712021 100644 |
4773 | --- a/fs/f2fs/data.c |
4774 | +++ b/fs/f2fs/data.c |
4775 | @@ -716,7 +716,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, |
4776 | } |
4777 | |
4778 | prealloc = 0; |
4779 | - ofs_in_node = dn.ofs_in_node; |
4780 | + last_ofs_in_node = ofs_in_node = dn.ofs_in_node; |
4781 | end_offset = ADDRS_PER_PAGE(dn.node_page, inode); |
4782 | |
4783 | next_block: |
4784 | diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c |
4785 | index 1c35e80732e0..687998e9557c 100644 |
4786 | --- a/fs/f2fs/debug.c |
4787 | +++ b/fs/f2fs/debug.c |
4788 | @@ -310,17 +310,17 @@ static int stat_show(struct seq_file *s, void *v) |
4789 | seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n", |
4790 | si->ext_tree, si->zombie_tree, si->ext_node); |
4791 | seq_puts(s, "\nBalancing F2FS Async:\n"); |
4792 | - seq_printf(s, " - inmem: %4lld, wb_bios: %4d\n", |
4793 | + seq_printf(s, " - inmem: %4d, wb_bios: %4d\n", |
4794 | si->inmem_pages, si->wb_bios); |
4795 | - seq_printf(s, " - nodes: %4lld in %4d\n", |
4796 | + seq_printf(s, " - nodes: %4d in %4d\n", |
4797 | si->ndirty_node, si->node_pages); |
4798 | - seq_printf(s, " - dents: %4lld in dirs:%4d (%4d)\n", |
4799 | + seq_printf(s, " - dents: %4d in dirs:%4d (%4d)\n", |
4800 | si->ndirty_dent, si->ndirty_dirs, si->ndirty_all); |
4801 | - seq_printf(s, " - datas: %4lld in files:%4d\n", |
4802 | + seq_printf(s, " - datas: %4d in files:%4d\n", |
4803 | si->ndirty_data, si->ndirty_files); |
4804 | - seq_printf(s, " - meta: %4lld in %4d\n", |
4805 | + seq_printf(s, " - meta: %4d in %4d\n", |
4806 | si->ndirty_meta, si->meta_pages); |
4807 | - seq_printf(s, " - imeta: %4lld\n", |
4808 | + seq_printf(s, " - imeta: %4d\n", |
4809 | si->ndirty_imeta); |
4810 | seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n", |
4811 | si->dirty_nats, si->nats, si->dirty_sits, si->sits); |
4812 | diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h |
4813 | index 6dd03115789b..506af456412f 100644 |
4814 | --- a/fs/f2fs/f2fs.h |
4815 | +++ b/fs/f2fs/f2fs.h |
4816 | @@ -819,7 +819,7 @@ struct f2fs_sb_info { |
4817 | atomic_t nr_wb_bios; /* # of writeback bios */ |
4818 | |
4819 | /* # of pages, see count_type */ |
4820 | - struct percpu_counter nr_pages[NR_COUNT_TYPE]; |
4821 | + atomic_t nr_pages[NR_COUNT_TYPE]; |
4822 | /* # of allocated blocks */ |
4823 | struct percpu_counter alloc_valid_block_count; |
4824 | |
4825 | @@ -1233,7 +1233,7 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, |
4826 | |
4827 | static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) |
4828 | { |
4829 | - percpu_counter_inc(&sbi->nr_pages[count_type]); |
4830 | + atomic_inc(&sbi->nr_pages[count_type]); |
4831 | |
4832 | if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES) |
4833 | return; |
4834 | @@ -1250,7 +1250,7 @@ static inline void inode_inc_dirty_pages(struct inode *inode) |
4835 | |
4836 | static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) |
4837 | { |
4838 | - percpu_counter_dec(&sbi->nr_pages[count_type]); |
4839 | + atomic_dec(&sbi->nr_pages[count_type]); |
4840 | } |
4841 | |
4842 | static inline void inode_dec_dirty_pages(struct inode *inode) |
4843 | @@ -1266,7 +1266,7 @@ static inline void inode_dec_dirty_pages(struct inode *inode) |
4844 | |
4845 | static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) |
4846 | { |
4847 | - return percpu_counter_sum_positive(&sbi->nr_pages[count_type]); |
4848 | + return atomic_read(&sbi->nr_pages[count_type]); |
4849 | } |
4850 | |
4851 | static inline int get_dirty_pages(struct inode *inode) |
4852 | @@ -2187,8 +2187,8 @@ struct f2fs_stat_info { |
4853 | unsigned long long hit_largest, hit_cached, hit_rbtree; |
4854 | unsigned long long hit_total, total_ext; |
4855 | int ext_tree, zombie_tree, ext_node; |
4856 | - s64 ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta; |
4857 | - s64 inmem_pages; |
4858 | + int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta; |
4859 | + int inmem_pages; |
4860 | unsigned int ndirty_dirs, ndirty_files, ndirty_all; |
4861 | int nats, dirty_nats, sits, dirty_sits, fnids; |
4862 | int total_count, utilization; |
4863 | diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c |
4864 | index 8021d35df7b0..013c6a541d6b 100644 |
4865 | --- a/fs/f2fs/super.c |
4866 | +++ b/fs/f2fs/super.c |
4867 | @@ -688,10 +688,6 @@ static void f2fs_destroy_inode(struct inode *inode) |
4868 | |
4869 | static void destroy_percpu_info(struct f2fs_sb_info *sbi) |
4870 | { |
4871 | - int i; |
4872 | - |
4873 | - for (i = 0; i < NR_COUNT_TYPE; i++) |
4874 | - percpu_counter_destroy(&sbi->nr_pages[i]); |
4875 | percpu_counter_destroy(&sbi->alloc_valid_block_count); |
4876 | percpu_counter_destroy(&sbi->total_valid_inode_count); |
4877 | } |
4878 | @@ -1442,6 +1438,7 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi) |
4879 | static void init_sb_info(struct f2fs_sb_info *sbi) |
4880 | { |
4881 | struct f2fs_super_block *raw_super = sbi->raw_super; |
4882 | + int i; |
4883 | |
4884 | sbi->log_sectors_per_block = |
4885 | le32_to_cpu(raw_super->log_sectors_per_block); |
4886 | @@ -1466,6 +1463,9 @@ static void init_sb_info(struct f2fs_sb_info *sbi) |
4887 | sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL; |
4888 | clear_sbi_flag(sbi, SBI_NEED_FSCK); |
4889 | |
4890 | + for (i = 0; i < NR_COUNT_TYPE; i++) |
4891 | + atomic_set(&sbi->nr_pages[i], 0); |
4892 | + |
4893 | INIT_LIST_HEAD(&sbi->s_list); |
4894 | mutex_init(&sbi->umount_mutex); |
4895 | mutex_init(&sbi->wio_mutex[NODE]); |
4896 | @@ -1481,13 +1481,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi) |
4897 | |
4898 | static int init_percpu_info(struct f2fs_sb_info *sbi) |
4899 | { |
4900 | - int i, err; |
4901 | - |
4902 | - for (i = 0; i < NR_COUNT_TYPE; i++) { |
4903 | - err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL); |
4904 | - if (err) |
4905 | - return err; |
4906 | - } |
4907 | + int err; |
4908 | |
4909 | err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL); |
4910 | if (err) |
4911 | diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c |
4912 | index e5ebc3770460..d346d42c54d1 100644 |
4913 | --- a/fs/xfs/libxfs/xfs_ag_resv.c |
4914 | +++ b/fs/xfs/libxfs/xfs_ag_resv.c |
4915 | @@ -256,6 +256,9 @@ xfs_ag_resv_init( |
4916 | goto out; |
4917 | } |
4918 | |
4919 | + ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved + |
4920 | + xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <= |
4921 | + pag->pagf_freeblks + pag->pagf_flcount); |
4922 | out: |
4923 | return error; |
4924 | } |
4925 | diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c |
4926 | index effb64cf714f..5050056a0b06 100644 |
4927 | --- a/fs/xfs/libxfs/xfs_alloc.c |
4928 | +++ b/fs/xfs/libxfs/xfs_alloc.c |
4929 | @@ -2455,12 +2455,15 @@ xfs_agf_verify( |
4930 | be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp))) |
4931 | return false; |
4932 | |
4933 | - if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS || |
4934 | + if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 || |
4935 | + be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 || |
4936 | + be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS || |
4937 | be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS) |
4938 | return false; |
4939 | |
4940 | if (xfs_sb_version_hasrmapbt(&mp->m_sb) && |
4941 | - be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS) |
4942 | + (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 || |
4943 | + be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS)) |
4944 | return false; |
4945 | |
4946 | /* |
4947 | @@ -2477,7 +2480,8 @@ xfs_agf_verify( |
4948 | return false; |
4949 | |
4950 | if (xfs_sb_version_hasreflink(&mp->m_sb) && |
4951 | - be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS) |
4952 | + (be32_to_cpu(agf->agf_refcount_level) < 1 || |
4953 | + be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS)) |
4954 | return false; |
4955 | |
4956 | return true;; |
4957 | diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c |
4958 | index 5ba2dac5e67c..c06ec77a9418 100644 |
4959 | --- a/fs/xfs/libxfs/xfs_alloc_btree.c |
4960 | +++ b/fs/xfs/libxfs/xfs_alloc_btree.c |
4961 | @@ -421,7 +421,7 @@ xfs_allocbt_init_cursor( |
4962 | |
4963 | ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); |
4964 | |
4965 | - cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); |
4966 | + cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); |
4967 | |
4968 | cur->bc_tp = tp; |
4969 | cur->bc_mp = mp; |
4970 | diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c |
4971 | index 8ea91f363093..2852521fc8ec 100644 |
4972 | --- a/fs/xfs/libxfs/xfs_attr_leaf.c |
4973 | +++ b/fs/xfs/libxfs/xfs_attr_leaf.c |
4974 | @@ -253,6 +253,7 @@ xfs_attr3_leaf_verify( |
4975 | { |
4976 | struct xfs_mount *mp = bp->b_target->bt_mount; |
4977 | struct xfs_attr_leafblock *leaf = bp->b_addr; |
4978 | + struct xfs_perag *pag = bp->b_pag; |
4979 | struct xfs_attr3_icleaf_hdr ichdr; |
4980 | |
4981 | xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf); |
4982 | @@ -273,7 +274,12 @@ xfs_attr3_leaf_verify( |
4983 | if (ichdr.magic != XFS_ATTR_LEAF_MAGIC) |
4984 | return false; |
4985 | } |
4986 | - if (ichdr.count == 0) |
4987 | + /* |
4988 | + * In recovery there is a transient state where count == 0 is valid |
4989 | + * because we may have transitioned an empty shortform attr to a leaf |
4990 | + * if the attr didn't fit in shortform. |
4991 | + */ |
4992 | + if (pag && pag->pagf_init && ichdr.count == 0) |
4993 | return false; |
4994 | |
4995 | /* XXX: need to range check rest of attr header values */ |
4996 | diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c |
4997 | index c6eb21940783..89d727b659fc 100644 |
4998 | --- a/fs/xfs/libxfs/xfs_bmap.c |
4999 | +++ b/fs/xfs/libxfs/xfs_bmap.c |
5000 | @@ -49,6 +49,8 @@ |
5001 | #include "xfs_rmap.h" |
5002 | #include "xfs_ag_resv.h" |
5003 | #include "xfs_refcount.h" |
5004 | +#include "xfs_rmap_btree.h" |
5005 | +#include "xfs_icache.h" |
5006 | |
5007 | |
5008 | kmem_zone_t *xfs_bmap_free_item_zone; |
5009 | @@ -190,8 +192,12 @@ xfs_bmap_worst_indlen( |
5010 | int maxrecs; /* maximum record count at this level */ |
5011 | xfs_mount_t *mp; /* mount structure */ |
5012 | xfs_filblks_t rval; /* return value */ |
5013 | + xfs_filblks_t orig_len; |
5014 | |
5015 | mp = ip->i_mount; |
5016 | + |
5017 | + /* Calculate the worst-case size of the bmbt. */ |
5018 | + orig_len = len; |
5019 | maxrecs = mp->m_bmap_dmxr[0]; |
5020 | for (level = 0, rval = 0; |
5021 | level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); |
5022 | @@ -199,12 +205,20 @@ xfs_bmap_worst_indlen( |
5023 | len += maxrecs - 1; |
5024 | do_div(len, maxrecs); |
5025 | rval += len; |
5026 | - if (len == 1) |
5027 | - return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - |
5028 | + if (len == 1) { |
5029 | + rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - |
5030 | level - 1; |
5031 | + break; |
5032 | + } |
5033 | if (level == 0) |
5034 | maxrecs = mp->m_bmap_dmxr[1]; |
5035 | } |
5036 | + |
5037 | + /* Calculate the worst-case size of the rmapbt. */ |
5038 | + if (xfs_sb_version_hasrmapbt(&mp->m_sb)) |
5039 | + rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) + |
5040 | + mp->m_rmap_maxlevels; |
5041 | + |
5042 | return rval; |
5043 | } |
5044 | |
5045 | @@ -504,7 +518,7 @@ void |
5046 | xfs_bmap_trace_exlist( |
5047 | xfs_inode_t *ip, /* incore inode pointer */ |
5048 | xfs_extnum_t cnt, /* count of entries in the list */ |
5049 | - int whichfork, /* data or attr fork */ |
5050 | + int whichfork, /* data or attr or cow fork */ |
5051 | unsigned long caller_ip) |
5052 | { |
5053 | xfs_extnum_t idx; /* extent record index */ |
5054 | @@ -513,11 +527,13 @@ xfs_bmap_trace_exlist( |
5055 | |
5056 | if (whichfork == XFS_ATTR_FORK) |
5057 | state |= BMAP_ATTRFORK; |
5058 | + else if (whichfork == XFS_COW_FORK) |
5059 | + state |= BMAP_COWFORK; |
5060 | |
5061 | ifp = XFS_IFORK_PTR(ip, whichfork); |
5062 | - ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); |
5063 | + ASSERT(cnt == xfs_iext_count(ifp)); |
5064 | for (idx = 0; idx < cnt; idx++) |
5065 | - trace_xfs_extlist(ip, idx, whichfork, caller_ip); |
5066 | + trace_xfs_extlist(ip, idx, state, caller_ip); |
5067 | } |
5068 | |
5069 | /* |
5070 | @@ -811,7 +827,7 @@ xfs_bmap_extents_to_btree( |
5071 | XFS_BTREE_LONG_PTRS); |
5072 | |
5073 | arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); |
5074 | - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5075 | + nextents = xfs_iext_count(ifp); |
5076 | for (cnt = i = 0; i < nextents; i++) { |
5077 | ep = xfs_iext_get_ext(ifp, i); |
5078 | if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) { |
5079 | @@ -1137,6 +1153,10 @@ xfs_bmap_add_attrfork( |
5080 | goto trans_cancel; |
5081 | if (XFS_IFORK_Q(ip)) |
5082 | goto trans_cancel; |
5083 | + if (ip->i_d.di_anextents != 0) { |
5084 | + error = -EFSCORRUPTED; |
5085 | + goto trans_cancel; |
5086 | + } |
5087 | if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { |
5088 | /* |
5089 | * For inodes coming from pre-6.2 filesystems. |
5090 | @@ -1144,7 +1164,6 @@ xfs_bmap_add_attrfork( |
5091 | ASSERT(ip->i_d.di_aformat == 0); |
5092 | ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; |
5093 | } |
5094 | - ASSERT(ip->i_d.di_anextents == 0); |
5095 | |
5096 | xfs_trans_ijoin(tp, ip, 0); |
5097 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
5098 | @@ -1296,7 +1315,7 @@ xfs_bmap_read_extents( |
5099 | /* |
5100 | * Here with bp and block set to the leftmost leaf node in the tree. |
5101 | */ |
5102 | - room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5103 | + room = xfs_iext_count(ifp); |
5104 | i = 0; |
5105 | /* |
5106 | * Loop over all leaf nodes. Copy information to the extent records. |
5107 | @@ -1361,8 +1380,9 @@ xfs_bmap_read_extents( |
5108 | return error; |
5109 | block = XFS_BUF_TO_BLOCK(bp); |
5110 | } |
5111 | - ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); |
5112 | - ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); |
5113 | + if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) |
5114 | + return -EFSCORRUPTED; |
5115 | + ASSERT(i == xfs_iext_count(ifp)); |
5116 | XFS_BMAP_TRACE_EXLIST(ip, i, whichfork); |
5117 | return 0; |
5118 | error0: |
5119 | @@ -1404,7 +1424,7 @@ xfs_bmap_search_multi_extents( |
5120 | if (lastx > 0) { |
5121 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp); |
5122 | } |
5123 | - if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { |
5124 | + if (lastx < xfs_iext_count(ifp)) { |
5125 | xfs_bmbt_get_all(ep, gotp); |
5126 | *eofp = 0; |
5127 | } else { |
5128 | @@ -1497,7 +1517,7 @@ xfs_bmap_first_unused( |
5129 | (error = xfs_iread_extents(tp, ip, whichfork))) |
5130 | return error; |
5131 | lowest = *first_unused; |
5132 | - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5133 | + nextents = xfs_iext_count(ifp); |
5134 | for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) { |
5135 | xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx); |
5136 | off = xfs_bmbt_get_startoff(ep); |
5137 | @@ -1582,7 +1602,7 @@ xfs_bmap_last_extent( |
5138 | return error; |
5139 | } |
5140 | |
5141 | - nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); |
5142 | + nextents = xfs_iext_count(ifp); |
5143 | if (nextents == 0) { |
5144 | *is_empty = 1; |
5145 | return 0; |
5146 | @@ -1735,7 +1755,7 @@ xfs_bmap_add_extent_delay_real( |
5147 | &bma->ip->i_d.di_nextents); |
5148 | |
5149 | ASSERT(bma->idx >= 0); |
5150 | - ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); |
5151 | + ASSERT(bma->idx <= xfs_iext_count(ifp)); |
5152 | ASSERT(!isnullstartblock(new->br_startblock)); |
5153 | ASSERT(!bma->cur || |
5154 | (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); |
5155 | @@ -1794,7 +1814,7 @@ xfs_bmap_add_extent_delay_real( |
5156 | * Don't set contiguous if the combined extent would be too large. |
5157 | * Also check for all-three-contiguous being too large. |
5158 | */ |
5159 | - if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { |
5160 | + if (bma->idx < xfs_iext_count(ifp) - 1) { |
5161 | state |= BMAP_RIGHT_VALID; |
5162 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT); |
5163 | |
5164 | @@ -2300,7 +2320,7 @@ xfs_bmap_add_extent_unwritten_real( |
5165 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); |
5166 | |
5167 | ASSERT(*idx >= 0); |
5168 | - ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); |
5169 | + ASSERT(*idx <= xfs_iext_count(ifp)); |
5170 | ASSERT(!isnullstartblock(new->br_startblock)); |
5171 | |
5172 | XFS_STATS_INC(mp, xs_add_exlist); |
5173 | @@ -2356,7 +2376,7 @@ xfs_bmap_add_extent_unwritten_real( |
5174 | * Don't set contiguous if the combined extent would be too large. |
5175 | * Also check for all-three-contiguous being too large. |
5176 | */ |
5177 | - if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { |
5178 | + if (*idx < xfs_iext_count(&ip->i_df) - 1) { |
5179 | state |= BMAP_RIGHT_VALID; |
5180 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT); |
5181 | if (isnullstartblock(RIGHT.br_startblock)) |
5182 | @@ -2836,7 +2856,7 @@ xfs_bmap_add_extent_hole_delay( |
5183 | * Check and set flags if the current (right) segment exists. |
5184 | * If it doesn't exist, we're converting the hole at end-of-file. |
5185 | */ |
5186 | - if (*idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { |
5187 | + if (*idx < xfs_iext_count(ifp)) { |
5188 | state |= BMAP_RIGHT_VALID; |
5189 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); |
5190 | |
5191 | @@ -2966,7 +2986,7 @@ xfs_bmap_add_extent_hole_real( |
5192 | ifp = XFS_IFORK_PTR(bma->ip, whichfork); |
5193 | |
5194 | ASSERT(bma->idx >= 0); |
5195 | - ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); |
5196 | + ASSERT(bma->idx <= xfs_iext_count(ifp)); |
5197 | ASSERT(!isnullstartblock(new->br_startblock)); |
5198 | ASSERT(!bma->cur || |
5199 | !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); |
5200 | @@ -2992,7 +3012,7 @@ xfs_bmap_add_extent_hole_real( |
5201 | * Check and set flags if this segment has a current value. |
5202 | * Not true if we're inserting into the "hole" at eof. |
5203 | */ |
5204 | - if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { |
5205 | + if (bma->idx < xfs_iext_count(ifp)) { |
5206 | state |= BMAP_RIGHT_VALID; |
5207 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right); |
5208 | if (isnullstartblock(right.br_startblock)) |
5209 | @@ -4221,7 +4241,7 @@ xfs_bmapi_read( |
5210 | break; |
5211 | |
5212 | /* Else go on to the next record. */ |
5213 | - if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) |
5214 | + if (++lastx < xfs_iext_count(ifp)) |
5215 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got); |
5216 | else |
5217 | eof = 1; |
5218 | @@ -4234,10 +4254,10 @@ int |
5219 | xfs_bmapi_reserve_delalloc( |
5220 | struct xfs_inode *ip, |
5221 | int whichfork, |
5222 | - xfs_fileoff_t aoff, |
5223 | + xfs_fileoff_t off, |
5224 | xfs_filblks_t len, |
5225 | + xfs_filblks_t prealloc, |
5226 | struct xfs_bmbt_irec *got, |
5227 | - struct xfs_bmbt_irec *prev, |
5228 | xfs_extnum_t *lastx, |
5229 | int eof) |
5230 | { |
5231 | @@ -4248,10 +4268,17 @@ xfs_bmapi_reserve_delalloc( |
5232 | char rt = XFS_IS_REALTIME_INODE(ip); |
5233 | xfs_extlen_t extsz; |
5234 | int error; |
5235 | + xfs_fileoff_t aoff = off; |
5236 | |
5237 | - alen = XFS_FILBLKS_MIN(len, MAXEXTLEN); |
5238 | + /* |
5239 | + * Cap the alloc length. Keep track of prealloc so we know whether to |
5240 | + * tag the inode before we return. |
5241 | + */ |
5242 | + alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN); |
5243 | if (!eof) |
5244 | alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); |
5245 | + if (prealloc && alen >= len) |
5246 | + prealloc = alen - len; |
5247 | |
5248 | /* Figure out the extent size, adjust alen */ |
5249 | if (whichfork == XFS_COW_FORK) |
5250 | @@ -4259,7 +4286,12 @@ xfs_bmapi_reserve_delalloc( |
5251 | else |
5252 | extsz = xfs_get_extsz_hint(ip); |
5253 | if (extsz) { |
5254 | - error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof, |
5255 | + struct xfs_bmbt_irec prev; |
5256 | + |
5257 | + if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev)) |
5258 | + prev.br_startoff = NULLFILEOFF; |
5259 | + |
5260 | + error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof, |
5261 | 1, 0, &aoff, &alen); |
5262 | ASSERT(!error); |
5263 | } |
5264 | @@ -4312,6 +4344,16 @@ xfs_bmapi_reserve_delalloc( |
5265 | */ |
5266 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got); |
5267 | |
5268 | + /* |
5269 | + * Tag the inode if blocks were preallocated. Note that COW fork |
5270 | + * preallocation can occur at the start or end of the extent, even when |
5271 | + * prealloc == 0, so we must also check the aligned offset and length. |
5272 | + */ |
5273 | + if (whichfork == XFS_DATA_FORK && prealloc) |
5274 | + xfs_inode_set_eofblocks_tag(ip); |
5275 | + if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) |
5276 | + xfs_inode_set_cowblocks_tag(ip); |
5277 | + |
5278 | ASSERT(got->br_startoff <= aoff); |
5279 | ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen); |
5280 | ASSERT(isnullstartblock(got->br_startblock)); |
5281 | @@ -4733,7 +4775,7 @@ xfs_bmapi_write( |
5282 | |
5283 | /* Else go on to the next record. */ |
5284 | bma.prev = bma.got; |
5285 | - if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) { |
5286 | + if (++bma.idx < xfs_iext_count(ifp)) { |
5287 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx), |
5288 | &bma.got); |
5289 | } else |
5290 | @@ -4885,7 +4927,7 @@ xfs_bmap_del_extent_delay( |
5291 | da_new = 0; |
5292 | |
5293 | ASSERT(*idx >= 0); |
5294 | - ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); |
5295 | + ASSERT(*idx <= xfs_iext_count(ifp)); |
5296 | ASSERT(del->br_blockcount > 0); |
5297 | ASSERT(got->br_startoff <= del->br_startoff); |
5298 | ASSERT(got_endoff >= del_endoff); |
5299 | @@ -4902,8 +4944,11 @@ xfs_bmap_del_extent_delay( |
5300 | * sb counters as we might have to borrow some blocks for the |
5301 | * indirect block accounting. |
5302 | */ |
5303 | - xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del->br_blockcount), 0, |
5304 | + error = xfs_trans_reserve_quota_nblks(NULL, ip, |
5305 | + -((long)del->br_blockcount), 0, |
5306 | isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); |
5307 | + if (error) |
5308 | + return error; |
5309 | ip->i_delayed_blks -= del->br_blockcount; |
5310 | |
5311 | if (whichfork == XFS_COW_FORK) |
5312 | @@ -5013,7 +5058,7 @@ xfs_bmap_del_extent_cow( |
5313 | got_endoff = got->br_startoff + got->br_blockcount; |
5314 | |
5315 | ASSERT(*idx >= 0); |
5316 | - ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); |
5317 | + ASSERT(*idx <= xfs_iext_count(ifp)); |
5318 | ASSERT(del->br_blockcount > 0); |
5319 | ASSERT(got->br_startoff <= del->br_startoff); |
5320 | ASSERT(got_endoff >= del_endoff); |
5321 | @@ -5119,8 +5164,7 @@ xfs_bmap_del_extent( |
5322 | state |= BMAP_COWFORK; |
5323 | |
5324 | ifp = XFS_IFORK_PTR(ip, whichfork); |
5325 | - ASSERT((*idx >= 0) && (*idx < ifp->if_bytes / |
5326 | - (uint)sizeof(xfs_bmbt_rec_t))); |
5327 | + ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp))); |
5328 | ASSERT(del->br_blockcount > 0); |
5329 | ep = xfs_iext_get_ext(ifp, *idx); |
5330 | xfs_bmbt_get_all(ep, &got); |
5331 | @@ -5445,7 +5489,6 @@ __xfs_bunmapi( |
5332 | int logflags; /* transaction logging flags */ |
5333 | xfs_extlen_t mod; /* rt extent offset */ |
5334 | xfs_mount_t *mp; /* mount structure */ |
5335 | - xfs_extnum_t nextents; /* number of file extents */ |
5336 | xfs_bmbt_irec_t prev; /* previous extent record */ |
5337 | xfs_fileoff_t start; /* first file offset deleted */ |
5338 | int tmp_logflags; /* partial logging flags */ |
5339 | @@ -5477,8 +5520,7 @@ __xfs_bunmapi( |
5340 | if (!(ifp->if_flags & XFS_IFEXTENTS) && |
5341 | (error = xfs_iread_extents(tp, ip, whichfork))) |
5342 | return error; |
5343 | - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5344 | - if (nextents == 0) { |
5345 | + if (xfs_iext_count(ifp) == 0) { |
5346 | *rlen = 0; |
5347 | return 0; |
5348 | } |
5349 | @@ -5963,7 +6005,7 @@ xfs_bmse_shift_one( |
5350 | |
5351 | mp = ip->i_mount; |
5352 | ifp = XFS_IFORK_PTR(ip, whichfork); |
5353 | - total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); |
5354 | + total_extents = xfs_iext_count(ifp); |
5355 | |
5356 | xfs_bmbt_get_all(gotp, &got); |
5357 | |
5358 | @@ -6140,7 +6182,7 @@ xfs_bmap_shift_extents( |
5359 | * are collapsing out, so we cannot use the count of real extents here. |
5360 | * Instead we have to calculate it from the incore fork. |
5361 | */ |
5362 | - total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); |
5363 | + total_extents = xfs_iext_count(ifp); |
5364 | if (total_extents == 0) { |
5365 | *done = 1; |
5366 | goto del_cursor; |
5367 | @@ -6200,7 +6242,7 @@ xfs_bmap_shift_extents( |
5368 | * count can change. Update the total and grade the next record. |
5369 | */ |
5370 | if (direction == SHIFT_LEFT) { |
5371 | - total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); |
5372 | + total_extents = xfs_iext_count(ifp); |
5373 | stop_extent = total_extents; |
5374 | } |
5375 | |
5376 | diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h |
5377 | index 7cae6ec27fa6..d6d175a4fdec 100644 |
5378 | --- a/fs/xfs/libxfs/xfs_bmap.h |
5379 | +++ b/fs/xfs/libxfs/xfs_bmap.h |
5380 | @@ -242,9 +242,8 @@ struct xfs_bmbt_rec_host * |
5381 | int fork, int *eofp, xfs_extnum_t *lastxp, |
5382 | struct xfs_bmbt_irec *gotp, struct xfs_bmbt_irec *prevp); |
5383 | int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork, |
5384 | - xfs_fileoff_t aoff, xfs_filblks_t len, |
5385 | - struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *prev, |
5386 | - xfs_extnum_t *lastx, int eof); |
5387 | + xfs_fileoff_t off, xfs_filblks_t len, xfs_filblks_t prealloc, |
5388 | + struct xfs_bmbt_irec *got, xfs_extnum_t *lastx, int eof); |
5389 | |
5390 | enum xfs_bmap_intent_type { |
5391 | XFS_BMAP_MAP = 1, |
5392 | diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c |
5393 | index 8007d2ba9aef..049fa597ae91 100644 |
5394 | --- a/fs/xfs/libxfs/xfs_bmap_btree.c |
5395 | +++ b/fs/xfs/libxfs/xfs_bmap_btree.c |
5396 | @@ -796,7 +796,7 @@ xfs_bmbt_init_cursor( |
5397 | struct xfs_btree_cur *cur; |
5398 | ASSERT(whichfork != XFS_COW_FORK); |
5399 | |
5400 | - cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); |
5401 | + cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); |
5402 | |
5403 | cur->bc_tp = tp; |
5404 | cur->bc_mp = mp; |
5405 | diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c |
5406 | index 0e80993c8a59..21e6a6ab6b9a 100644 |
5407 | --- a/fs/xfs/libxfs/xfs_btree.c |
5408 | +++ b/fs/xfs/libxfs/xfs_btree.c |
5409 | @@ -1769,8 +1769,28 @@ xfs_btree_lookup_get_block( |
5410 | if (error) |
5411 | return error; |
5412 | |
5413 | + /* Check the inode owner since the verifiers don't. */ |
5414 | + if (xfs_sb_version_hascrc(&cur->bc_mp->m_sb) && |
5415 | + (cur->bc_flags & XFS_BTREE_LONG_PTRS) && |
5416 | + be64_to_cpu((*blkp)->bb_u.l.bb_owner) != |
5417 | + cur->bc_private.b.ip->i_ino) |
5418 | + goto out_bad; |
5419 | + |
5420 | + /* Did we get the level we were looking for? */ |
5421 | + if (be16_to_cpu((*blkp)->bb_level) != level) |
5422 | + goto out_bad; |
5423 | + |
5424 | + /* Check that internal nodes have at least one record. */ |
5425 | + if (level != 0 && be16_to_cpu((*blkp)->bb_numrecs) == 0) |
5426 | + goto out_bad; |
5427 | + |
5428 | xfs_btree_setbuf(cur, level, bp); |
5429 | return 0; |
5430 | + |
5431 | +out_bad: |
5432 | + *blkp = NULL; |
5433 | + xfs_trans_brelse(cur->bc_tp, bp); |
5434 | + return -EFSCORRUPTED; |
5435 | } |
5436 | |
5437 | /* |
5438 | diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c |
5439 | index 725fc7841fde..e526f5a5f0be 100644 |
5440 | --- a/fs/xfs/libxfs/xfs_dir2_data.c |
5441 | +++ b/fs/xfs/libxfs/xfs_dir2_data.c |
5442 | @@ -329,7 +329,7 @@ xfs_dir3_data_read( |
5443 | |
5444 | err = xfs_da_read_buf(tp, dp, bno, mapped_bno, bpp, |
5445 | XFS_DATA_FORK, &xfs_dir3_data_buf_ops); |
5446 | - if (!err && tp) |
5447 | + if (!err && tp && *bpp) |
5448 | xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_DATA_BUF); |
5449 | return err; |
5450 | } |
5451 | diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c |
5452 | index 51b4e0de1fdc..d45c03779dae 100644 |
5453 | --- a/fs/xfs/libxfs/xfs_ialloc.c |
5454 | +++ b/fs/xfs/libxfs/xfs_ialloc.c |
5455 | @@ -2450,8 +2450,6 @@ xfs_ialloc_log_agi( |
5456 | ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); |
5457 | #endif |
5458 | |
5459 | - xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGI_BUF); |
5460 | - |
5461 | /* |
5462 | * Compute byte offsets for the first and last fields in the first |
5463 | * region and log the agi buffer. This only logs up through |
5464 | @@ -2512,8 +2510,15 @@ xfs_agi_verify( |
5465 | if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum))) |
5466 | return false; |
5467 | |
5468 | - if (be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS) |
5469 | + if (be32_to_cpu(agi->agi_level) < 1 || |
5470 | + be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS) |
5471 | + return false; |
5472 | + |
5473 | + if (xfs_sb_version_hasfinobt(&mp->m_sb) && |
5474 | + (be32_to_cpu(agi->agi_free_level) < 1 || |
5475 | + be32_to_cpu(agi->agi_free_level) > XFS_BTREE_MAXLEVELS)) |
5476 | return false; |
5477 | + |
5478 | /* |
5479 | * during growfs operations, the perag is not fully initialised, |
5480 | * so we can't use it for any useful checking. growfs ensures we can't |
5481 | @@ -2592,6 +2597,8 @@ xfs_read_agi( |
5482 | XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops); |
5483 | if (error) |
5484 | return error; |
5485 | + if (tp) |
5486 | + xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_AGI_BUF); |
5487 | |
5488 | xfs_buf_set_ref(*bpp, XFS_AGI_REF); |
5489 | return 0; |
5490 | diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c |
5491 | index eab68ae2e011..6c6b95947e71 100644 |
5492 | --- a/fs/xfs/libxfs/xfs_ialloc_btree.c |
5493 | +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c |
5494 | @@ -357,7 +357,7 @@ xfs_inobt_init_cursor( |
5495 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); |
5496 | struct xfs_btree_cur *cur; |
5497 | |
5498 | - cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); |
5499 | + cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); |
5500 | |
5501 | cur->bc_tp = tp; |
5502 | cur->bc_mp = mp; |
5503 | diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c |
5504 | index 134424fac434..c906e50515f0 100644 |
5505 | --- a/fs/xfs/libxfs/xfs_inode_buf.c |
5506 | +++ b/fs/xfs/libxfs/xfs_inode_buf.c |
5507 | @@ -392,6 +392,14 @@ xfs_dinode_verify( |
5508 | if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) |
5509 | return false; |
5510 | |
5511 | + /* don't allow invalid i_size */ |
5512 | + if (be64_to_cpu(dip->di_size) & (1ULL << 63)) |
5513 | + return false; |
5514 | + |
5515 | + /* No zero-length symlinks. */ |
5516 | + if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0) |
5517 | + return false; |
5518 | + |
5519 | /* only version 3 or greater inodes are extensively verified here */ |
5520 | if (dip->di_version < 3) |
5521 | return true; |
5522 | diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c |
5523 | index 5dd56d3dbb3a..222e103356c6 100644 |
5524 | --- a/fs/xfs/libxfs/xfs_inode_fork.c |
5525 | +++ b/fs/xfs/libxfs/xfs_inode_fork.c |
5526 | @@ -775,6 +775,13 @@ xfs_idestroy_fork( |
5527 | } |
5528 | } |
5529 | |
5530 | +/* Count number of incore extents based on if_bytes */ |
5531 | +xfs_extnum_t |
5532 | +xfs_iext_count(struct xfs_ifork *ifp) |
5533 | +{ |
5534 | + return ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5535 | +} |
5536 | + |
5537 | /* |
5538 | * Convert in-core extents to on-disk form |
5539 | * |
5540 | @@ -803,7 +810,7 @@ xfs_iextents_copy( |
5541 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
5542 | ASSERT(ifp->if_bytes > 0); |
5543 | |
5544 | - nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5545 | + nrecs = xfs_iext_count(ifp); |
5546 | XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork); |
5547 | ASSERT(nrecs > 0); |
5548 | |
5549 | @@ -941,7 +948,7 @@ xfs_iext_get_ext( |
5550 | xfs_extnum_t idx) /* index of target extent */ |
5551 | { |
5552 | ASSERT(idx >= 0); |
5553 | - ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); |
5554 | + ASSERT(idx < xfs_iext_count(ifp)); |
5555 | |
5556 | if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { |
5557 | return ifp->if_u1.if_ext_irec->er_extbuf; |
5558 | @@ -1017,7 +1024,7 @@ xfs_iext_add( |
5559 | int new_size; /* size of extents after adding */ |
5560 | xfs_extnum_t nextents; /* number of extents in file */ |
5561 | |
5562 | - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5563 | + nextents = xfs_iext_count(ifp); |
5564 | ASSERT((idx >= 0) && (idx <= nextents)); |
5565 | byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); |
5566 | new_size = ifp->if_bytes + byte_diff; |
5567 | @@ -1241,7 +1248,7 @@ xfs_iext_remove( |
5568 | trace_xfs_iext_remove(ip, idx, state, _RET_IP_); |
5569 | |
5570 | ASSERT(ext_diff > 0); |
5571 | - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5572 | + nextents = xfs_iext_count(ifp); |
5573 | new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); |
5574 | |
5575 | if (new_size == 0) { |
5576 | @@ -1270,7 +1277,7 @@ xfs_iext_remove_inline( |
5577 | |
5578 | ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); |
5579 | ASSERT(idx < XFS_INLINE_EXTS); |
5580 | - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5581 | + nextents = xfs_iext_count(ifp); |
5582 | ASSERT(((nextents - ext_diff) > 0) && |
5583 | (nextents - ext_diff) < XFS_INLINE_EXTS); |
5584 | |
5585 | @@ -1309,7 +1316,7 @@ xfs_iext_remove_direct( |
5586 | ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); |
5587 | new_size = ifp->if_bytes - |
5588 | (ext_diff * sizeof(xfs_bmbt_rec_t)); |
5589 | - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5590 | + nextents = xfs_iext_count(ifp); |
5591 | |
5592 | if (new_size == 0) { |
5593 | xfs_iext_destroy(ifp); |
5594 | @@ -1546,7 +1553,7 @@ xfs_iext_indirect_to_direct( |
5595 | int size; /* size of file extents */ |
5596 | |
5597 | ASSERT(ifp->if_flags & XFS_IFEXTIREC); |
5598 | - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5599 | + nextents = xfs_iext_count(ifp); |
5600 | ASSERT(nextents <= XFS_LINEAR_EXTS); |
5601 | size = nextents * sizeof(xfs_bmbt_rec_t); |
5602 | |
5603 | @@ -1620,7 +1627,7 @@ xfs_iext_bno_to_ext( |
5604 | xfs_extnum_t nextents; /* number of file extents */ |
5605 | xfs_fileoff_t startoff = 0; /* start offset of extent */ |
5606 | |
5607 | - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5608 | + nextents = xfs_iext_count(ifp); |
5609 | if (nextents == 0) { |
5610 | *idxp = 0; |
5611 | return NULL; |
5612 | @@ -1733,8 +1740,8 @@ xfs_iext_idx_to_irec( |
5613 | |
5614 | ASSERT(ifp->if_flags & XFS_IFEXTIREC); |
5615 | ASSERT(page_idx >= 0); |
5616 | - ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); |
5617 | - ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc); |
5618 | + ASSERT(page_idx <= xfs_iext_count(ifp)); |
5619 | + ASSERT(page_idx < xfs_iext_count(ifp) || realloc); |
5620 | |
5621 | nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; |
5622 | erp_idx = 0; |
5623 | @@ -1782,7 +1789,7 @@ xfs_iext_irec_init( |
5624 | xfs_extnum_t nextents; /* number of extents in file */ |
5625 | |
5626 | ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); |
5627 | - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5628 | + nextents = xfs_iext_count(ifp); |
5629 | ASSERT(nextents <= XFS_LINEAR_EXTS); |
5630 | |
5631 | erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS); |
5632 | @@ -1906,7 +1913,7 @@ xfs_iext_irec_compact( |
5633 | |
5634 | ASSERT(ifp->if_flags & XFS_IFEXTIREC); |
5635 | nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; |
5636 | - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5637 | + nextents = xfs_iext_count(ifp); |
5638 | |
5639 | if (nextents == 0) { |
5640 | xfs_iext_destroy(ifp); |
5641 | @@ -1996,3 +2003,49 @@ xfs_ifork_init_cow( |
5642 | ip->i_cformat = XFS_DINODE_FMT_EXTENTS; |
5643 | ip->i_cnextents = 0; |
5644 | } |
5645 | + |
5646 | +/* |
5647 | + * Lookup the extent covering bno. |
5648 | + * |
5649 | + * If there is an extent covering bno return the extent index, and store the |
5650 | + * expanded extent structure in *gotp, and the extent index in *idx. |
5651 | + * If there is no extent covering bno, but there is an extent after it (e.g. |
5652 | + * it lies in a hole) return that extent in *gotp and its index in *idx |
5653 | + * instead. |
5654 | + * If bno is beyond the last extent return false, and return the index after |
5655 | + * the last valid index in *idxp. |
5656 | + */ |
5657 | +bool |
5658 | +xfs_iext_lookup_extent( |
5659 | + struct xfs_inode *ip, |
5660 | + struct xfs_ifork *ifp, |
5661 | + xfs_fileoff_t bno, |
5662 | + xfs_extnum_t *idxp, |
5663 | + struct xfs_bmbt_irec *gotp) |
5664 | +{ |
5665 | + struct xfs_bmbt_rec_host *ep; |
5666 | + |
5667 | + XFS_STATS_INC(ip->i_mount, xs_look_exlist); |
5668 | + |
5669 | + ep = xfs_iext_bno_to_ext(ifp, bno, idxp); |
5670 | + if (!ep) |
5671 | + return false; |
5672 | + xfs_bmbt_get_all(ep, gotp); |
5673 | + return true; |
5674 | +} |
5675 | + |
5676 | +/* |
5677 | + * Return true if there is an extent at index idx, and return the expanded |
5678 | + * extent structure at idx in that case. Else return false. |
5679 | + */ |
5680 | +bool |
5681 | +xfs_iext_get_extent( |
5682 | + struct xfs_ifork *ifp, |
5683 | + xfs_extnum_t idx, |
5684 | + struct xfs_bmbt_irec *gotp) |
5685 | +{ |
5686 | + if (idx < 0 || idx >= xfs_iext_count(ifp)) |
5687 | + return false; |
5688 | + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), gotp); |
5689 | + return true; |
5690 | +} |
5691 | diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h |
5692 | index c9476f50e32d..7fb8365326d1 100644 |
5693 | --- a/fs/xfs/libxfs/xfs_inode_fork.h |
5694 | +++ b/fs/xfs/libxfs/xfs_inode_fork.h |
5695 | @@ -152,6 +152,7 @@ void xfs_init_local_fork(struct xfs_inode *, int, const void *, int); |
5696 | |
5697 | struct xfs_bmbt_rec_host * |
5698 | xfs_iext_get_ext(struct xfs_ifork *, xfs_extnum_t); |
5699 | +xfs_extnum_t xfs_iext_count(struct xfs_ifork *); |
5700 | void xfs_iext_insert(struct xfs_inode *, xfs_extnum_t, xfs_extnum_t, |
5701 | struct xfs_bmbt_irec *, int); |
5702 | void xfs_iext_add(struct xfs_ifork *, xfs_extnum_t, int); |
5703 | @@ -181,6 +182,12 @@ void xfs_iext_irec_compact_pages(struct xfs_ifork *); |
5704 | void xfs_iext_irec_compact_full(struct xfs_ifork *); |
5705 | void xfs_iext_irec_update_extoffs(struct xfs_ifork *, int, int); |
5706 | |
5707 | +bool xfs_iext_lookup_extent(struct xfs_inode *ip, |
5708 | + struct xfs_ifork *ifp, xfs_fileoff_t bno, |
5709 | + xfs_extnum_t *idxp, struct xfs_bmbt_irec *gotp); |
5710 | +bool xfs_iext_get_extent(struct xfs_ifork *ifp, xfs_extnum_t idx, |
5711 | + struct xfs_bmbt_irec *gotp); |
5712 | + |
5713 | extern struct kmem_zone *xfs_ifork_zone; |
5714 | |
5715 | extern void xfs_ifork_init_cow(struct xfs_inode *ip); |
5716 | diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c |
5717 | index 453bb2757ec2..2ba216966002 100644 |
5718 | --- a/fs/xfs/libxfs/xfs_refcount_btree.c |
5719 | +++ b/fs/xfs/libxfs/xfs_refcount_btree.c |
5720 | @@ -408,13 +408,14 @@ xfs_refcountbt_calc_size( |
5721 | */ |
5722 | xfs_extlen_t |
5723 | xfs_refcountbt_max_size( |
5724 | - struct xfs_mount *mp) |
5725 | + struct xfs_mount *mp, |
5726 | + xfs_agblock_t agblocks) |
5727 | { |
5728 | /* Bail out if we're uninitialized, which can happen in mkfs. */ |
5729 | if (mp->m_refc_mxr[0] == 0) |
5730 | return 0; |
5731 | |
5732 | - return xfs_refcountbt_calc_size(mp, mp->m_sb.sb_agblocks); |
5733 | + return xfs_refcountbt_calc_size(mp, agblocks); |
5734 | } |
5735 | |
5736 | /* |
5737 | @@ -429,22 +430,24 @@ xfs_refcountbt_calc_reserves( |
5738 | { |
5739 | struct xfs_buf *agbp; |
5740 | struct xfs_agf *agf; |
5741 | + xfs_agblock_t agblocks; |
5742 | xfs_extlen_t tree_len; |
5743 | int error; |
5744 | |
5745 | if (!xfs_sb_version_hasreflink(&mp->m_sb)) |
5746 | return 0; |
5747 | |
5748 | - *ask += xfs_refcountbt_max_size(mp); |
5749 | |
5750 | error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); |
5751 | if (error) |
5752 | return error; |
5753 | |
5754 | agf = XFS_BUF_TO_AGF(agbp); |
5755 | + agblocks = be32_to_cpu(agf->agf_length); |
5756 | tree_len = be32_to_cpu(agf->agf_refcount_blocks); |
5757 | xfs_buf_relse(agbp); |
5758 | |
5759 | + *ask += xfs_refcountbt_max_size(mp, agblocks); |
5760 | *used += tree_len; |
5761 | |
5762 | return error; |
5763 | diff --git a/fs/xfs/libxfs/xfs_refcount_btree.h b/fs/xfs/libxfs/xfs_refcount_btree.h |
5764 | index 3be7768bd51a..9db008b955b7 100644 |
5765 | --- a/fs/xfs/libxfs/xfs_refcount_btree.h |
5766 | +++ b/fs/xfs/libxfs/xfs_refcount_btree.h |
5767 | @@ -66,7 +66,8 @@ extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp); |
5768 | |
5769 | extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp, |
5770 | unsigned long long len); |
5771 | -extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp); |
5772 | +extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp, |
5773 | + xfs_agblock_t agblocks); |
5774 | |
5775 | extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp, |
5776 | xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used); |
5777 | diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c |
5778 | index 83e672ff7577..33a28efc3085 100644 |
5779 | --- a/fs/xfs/libxfs/xfs_rmap_btree.c |
5780 | +++ b/fs/xfs/libxfs/xfs_rmap_btree.c |
5781 | @@ -549,13 +549,14 @@ xfs_rmapbt_calc_size( |
5782 | */ |
5783 | xfs_extlen_t |
5784 | xfs_rmapbt_max_size( |
5785 | - struct xfs_mount *mp) |
5786 | + struct xfs_mount *mp, |
5787 | + xfs_agblock_t agblocks) |
5788 | { |
5789 | /* Bail out if we're uninitialized, which can happen in mkfs. */ |
5790 | if (mp->m_rmap_mxr[0] == 0) |
5791 | return 0; |
5792 | |
5793 | - return xfs_rmapbt_calc_size(mp, mp->m_sb.sb_agblocks); |
5794 | + return xfs_rmapbt_calc_size(mp, agblocks); |
5795 | } |
5796 | |
5797 | /* |
5798 | @@ -570,25 +571,24 @@ xfs_rmapbt_calc_reserves( |
5799 | { |
5800 | struct xfs_buf *agbp; |
5801 | struct xfs_agf *agf; |
5802 | - xfs_extlen_t pool_len; |
5803 | + xfs_agblock_t agblocks; |
5804 | xfs_extlen_t tree_len; |
5805 | int error; |
5806 | |
5807 | if (!xfs_sb_version_hasrmapbt(&mp->m_sb)) |
5808 | return 0; |
5809 | |
5810 | - /* Reserve 1% of the AG or enough for 1 block per record. */ |
5811 | - pool_len = max(mp->m_sb.sb_agblocks / 100, xfs_rmapbt_max_size(mp)); |
5812 | - *ask += pool_len; |
5813 | - |
5814 | error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); |
5815 | if (error) |
5816 | return error; |
5817 | |
5818 | agf = XFS_BUF_TO_AGF(agbp); |
5819 | + agblocks = be32_to_cpu(agf->agf_length); |
5820 | tree_len = be32_to_cpu(agf->agf_rmap_blocks); |
5821 | xfs_buf_relse(agbp); |
5822 | |
5823 | + /* Reserve 1% of the AG or enough for 1 block per record. */ |
5824 | + *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks)); |
5825 | *used += tree_len; |
5826 | |
5827 | return error; |
5828 | diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h |
5829 | index 2a9ac472fb15..19c08e933049 100644 |
5830 | --- a/fs/xfs/libxfs/xfs_rmap_btree.h |
5831 | +++ b/fs/xfs/libxfs/xfs_rmap_btree.h |
5832 | @@ -60,7 +60,8 @@ extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp); |
5833 | |
5834 | extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp, |
5835 | unsigned long long len); |
5836 | -extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp); |
5837 | +extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp, |
5838 | + xfs_agblock_t agblocks); |
5839 | |
5840 | extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp, |
5841 | xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used); |
5842 | diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c |
5843 | index a70aec910626..2580262e4ea0 100644 |
5844 | --- a/fs/xfs/libxfs/xfs_sb.c |
5845 | +++ b/fs/xfs/libxfs/xfs_sb.c |
5846 | @@ -262,6 +262,12 @@ xfs_mount_validate_sb( |
5847 | return -EFSCORRUPTED; |
5848 | } |
5849 | |
5850 | + if (xfs_sb_version_hascrc(&mp->m_sb) && |
5851 | + sbp->sb_blocksize < XFS_MIN_CRC_BLOCKSIZE) { |
5852 | + xfs_notice(mp, "v5 SB sanity check failed"); |
5853 | + return -EFSCORRUPTED; |
5854 | + } |
5855 | + |
5856 | /* |
5857 | * Until this is fixed only page-sized or smaller data blocks work. |
5858 | */ |
5859 | @@ -338,13 +344,16 @@ xfs_sb_quota_from_disk(struct xfs_sb *sbp) |
5860 | XFS_PQUOTA_CHKD : XFS_GQUOTA_CHKD; |
5861 | sbp->sb_qflags &= ~(XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD); |
5862 | |
5863 | - if (sbp->sb_qflags & XFS_PQUOTA_ACCT) { |
5864 | + if (sbp->sb_qflags & XFS_PQUOTA_ACCT && |
5865 | + sbp->sb_gquotino != NULLFSINO) { |
5866 | /* |
5867 | * In older version of superblock, on-disk superblock only |
5868 | * has sb_gquotino, and in-core superblock has both sb_gquotino |
5869 | * and sb_pquotino. But, only one of them is supported at any |
5870 | * point of time. So, if PQUOTA is set in disk superblock, |
5871 | - * copy over sb_gquotino to sb_pquotino. |
5872 | + * copy over sb_gquotino to sb_pquotino. The NULLFSINO test |
5873 | + * above is to make sure we don't do this twice and wipe them |
5874 | + * both out! |
5875 | */ |
5876 | sbp->sb_pquotino = sbp->sb_gquotino; |
5877 | sbp->sb_gquotino = NULLFSINO; |
5878 | diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h |
5879 | index 8d74870468c2..cf044c0f4d41 100644 |
5880 | --- a/fs/xfs/libxfs/xfs_types.h |
5881 | +++ b/fs/xfs/libxfs/xfs_types.h |
5882 | @@ -75,11 +75,14 @@ typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */ |
5883 | * Minimum and maximum blocksize and sectorsize. |
5884 | * The blocksize upper limit is pretty much arbitrary. |
5885 | * The sectorsize upper limit is due to sizeof(sb_sectsize). |
5886 | + * CRC enable filesystems use 512 byte inodes, meaning 512 byte block sizes |
5887 | + * cannot be used. |
5888 | */ |
5889 | #define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */ |
5890 | #define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */ |
5891 | #define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG) |
5892 | #define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG) |
5893 | +#define XFS_MIN_CRC_BLOCKSIZE (1 << (XFS_MIN_BLOCKSIZE_LOG + 1)) |
5894 | #define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */ |
5895 | #define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */ |
5896 | #define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG) |
5897 | diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c |
5898 | index 3e57a56cf829..2693ba84ec25 100644 |
5899 | --- a/fs/xfs/xfs_aops.c |
5900 | +++ b/fs/xfs/xfs_aops.c |
5901 | @@ -1361,6 +1361,26 @@ __xfs_get_blocks( |
5902 | if (error) |
5903 | goto out_unlock; |
5904 | |
5905 | + /* |
5906 | + * The only time we can ever safely find delalloc blocks on direct I/O |
5907 | + * is a dio write to post-eof speculative preallocation. All other |
5908 | + * scenarios are indicative of a problem or misuse (such as mixing |
5909 | + * direct and mapped I/O). |
5910 | + * |
5911 | + * The file may be unmapped by the time we get here so we cannot |
5912 | + * reliably fail the I/O based on mapping. Instead, fail the I/O if this |
5913 | + * is a read or a write within eof. Otherwise, carry on but warn as a |
5914 | + * precuation if the file happens to be mapped. |
5915 | + */ |
5916 | + if (direct && imap.br_startblock == DELAYSTARTBLOCK) { |
5917 | + if (!create || offset < i_size_read(VFS_I(ip))) { |
5918 | + WARN_ON_ONCE(1); |
5919 | + error = -EIO; |
5920 | + goto out_unlock; |
5921 | + } |
5922 | + WARN_ON_ONCE(mapping_mapped(VFS_I(ip)->i_mapping)); |
5923 | + } |
5924 | + |
5925 | /* for DAX, we convert unwritten extents directly */ |
5926 | if (create && |
5927 | (!nimaps || |
5928 | @@ -1450,8 +1470,6 @@ __xfs_get_blocks( |
5929 | (new || ISUNWRITTEN(&imap)))) |
5930 | set_buffer_new(bh_result); |
5931 | |
5932 | - BUG_ON(direct && imap.br_startblock == DELAYSTARTBLOCK); |
5933 | - |
5934 | return 0; |
5935 | |
5936 | out_unlock: |
5937 | diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c |
5938 | index 47074e0c33f3..0670a8bd5818 100644 |
5939 | --- a/fs/xfs/xfs_bmap_util.c |
5940 | +++ b/fs/xfs/xfs_bmap_util.c |
5941 | @@ -359,9 +359,7 @@ xfs_bmap_count_blocks( |
5942 | mp = ip->i_mount; |
5943 | ifp = XFS_IFORK_PTR(ip, whichfork); |
5944 | if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { |
5945 | - xfs_bmap_count_leaves(ifp, 0, |
5946 | - ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), |
5947 | - count); |
5948 | + xfs_bmap_count_leaves(ifp, 0, xfs_iext_count(ifp), count); |
5949 | return 0; |
5950 | } |
5951 | |
5952 | @@ -426,7 +424,7 @@ xfs_getbmapx_fix_eof_hole( |
5953 | ifp = XFS_IFORK_PTR(ip, whichfork); |
5954 | if (!moretocome && |
5955 | xfs_iext_bno_to_ext(ifp, fileblock, &lastx) && |
5956 | - (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1)) |
5957 | + (lastx == xfs_iext_count(ifp) - 1)) |
5958 | out->bmv_oflags |= BMV_OF_LAST; |
5959 | } |
5960 | |
5961 | @@ -1878,15 +1876,13 @@ xfs_swap_extent_forks( |
5962 | |
5963 | switch (ip->i_d.di_format) { |
5964 | case XFS_DINODE_FMT_EXTENTS: |
5965 | - /* If the extents fit in the inode, fix the |
5966 | - * pointer. Otherwise it's already NULL or |
5967 | - * pointing to the extent. |
5968 | + /* |
5969 | + * If the extents fit in the inode, fix the pointer. Otherwise |
5970 | + * it's already NULL or pointing to the extent. |
5971 | */ |
5972 | - nextents = ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5973 | - if (nextents <= XFS_INLINE_EXTS) { |
5974 | - ifp->if_u1.if_extents = |
5975 | - ifp->if_u2.if_inline_ext; |
5976 | - } |
5977 | + nextents = xfs_iext_count(&ip->i_df); |
5978 | + if (nextents <= XFS_INLINE_EXTS) |
5979 | + ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; |
5980 | (*src_log_flags) |= XFS_ILOG_DEXT; |
5981 | break; |
5982 | case XFS_DINODE_FMT_BTREE: |
5983 | @@ -1898,15 +1894,13 @@ xfs_swap_extent_forks( |
5984 | |
5985 | switch (tip->i_d.di_format) { |
5986 | case XFS_DINODE_FMT_EXTENTS: |
5987 | - /* If the extents fit in the inode, fix the |
5988 | - * pointer. Otherwise it's already NULL or |
5989 | - * pointing to the extent. |
5990 | + /* |
5991 | + * If the extents fit in the inode, fix the pointer. Otherwise |
5992 | + * it's already NULL or pointing to the extent. |
5993 | */ |
5994 | - nextents = tip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
5995 | - if (nextents <= XFS_INLINE_EXTS) { |
5996 | - tifp->if_u1.if_extents = |
5997 | - tifp->if_u2.if_inline_ext; |
5998 | - } |
5999 | + nextents = xfs_iext_count(&tip->i_df); |
6000 | + if (nextents <= XFS_INLINE_EXTS) |
6001 | + tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext; |
6002 | (*target_log_flags) |= XFS_ILOG_DEXT; |
6003 | break; |
6004 | case XFS_DINODE_FMT_BTREE: |
6005 | diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c |
6006 | index 6e4f7f900fea..9a5d64b5f35a 100644 |
6007 | --- a/fs/xfs/xfs_file.c |
6008 | +++ b/fs/xfs/xfs_file.c |
6009 | @@ -939,7 +939,6 @@ xfs_file_clone_range( |
6010 | len, false); |
6011 | } |
6012 | |
6013 | -#define XFS_MAX_DEDUPE_LEN (16 * 1024 * 1024) |
6014 | STATIC ssize_t |
6015 | xfs_file_dedupe_range( |
6016 | struct file *src_file, |
6017 | @@ -950,14 +949,6 @@ xfs_file_dedupe_range( |
6018 | { |
6019 | int error; |
6020 | |
6021 | - /* |
6022 | - * Limit the total length we will dedupe for each operation. |
6023 | - * This is intended to bound the total time spent in this |
6024 | - * ioctl to something sane. |
6025 | - */ |
6026 | - if (len > XFS_MAX_DEDUPE_LEN) |
6027 | - len = XFS_MAX_DEDUPE_LEN; |
6028 | - |
6029 | error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff, |
6030 | len, true); |
6031 | if (error) |
6032 | diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c |
6033 | index 93d12fa2670d..242e8091296d 100644 |
6034 | --- a/fs/xfs/xfs_fsops.c |
6035 | +++ b/fs/xfs/xfs_fsops.c |
6036 | @@ -631,6 +631,20 @@ xfs_growfs_data_private( |
6037 | xfs_set_low_space_thresholds(mp); |
6038 | mp->m_alloc_set_aside = xfs_alloc_set_aside(mp); |
6039 | |
6040 | + /* |
6041 | + * If we expanded the last AG, free the per-AG reservation |
6042 | + * so we can reinitialize it with the new size. |
6043 | + */ |
6044 | + if (new) { |
6045 | + struct xfs_perag *pag; |
6046 | + |
6047 | + pag = xfs_perag_get(mp, agno); |
6048 | + error = xfs_ag_resv_free(pag); |
6049 | + xfs_perag_put(pag); |
6050 | + if (error) |
6051 | + goto out; |
6052 | + } |
6053 | + |
6054 | /* Reserve AG metadata blocks. */ |
6055 | error = xfs_fs_reserve_ag_blocks(mp); |
6056 | if (error && error != -ENOSPC) |
6057 | diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c |
6058 | index f295049db681..29cc9886a3cb 100644 |
6059 | --- a/fs/xfs/xfs_icache.c |
6060 | +++ b/fs/xfs/xfs_icache.c |
6061 | @@ -123,7 +123,6 @@ __xfs_inode_free( |
6062 | { |
6063 | /* asserts to verify all state is correct here */ |
6064 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
6065 | - ASSERT(!xfs_isiflocked(ip)); |
6066 | XFS_STATS_DEC(ip->i_mount, vn_active); |
6067 | |
6068 | call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); |
6069 | @@ -133,6 +132,8 @@ void |
6070 | xfs_inode_free( |
6071 | struct xfs_inode *ip) |
6072 | { |
6073 | + ASSERT(!xfs_isiflocked(ip)); |
6074 | + |
6075 | /* |
6076 | * Because we use RCU freeing we need to ensure the inode always |
6077 | * appears to be reclaimed with an invalid inode number when in the |
6078 | @@ -981,6 +982,7 @@ xfs_reclaim_inode( |
6079 | |
6080 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
6081 | xfs_iunpin_wait(ip); |
6082 | + /* xfs_iflush_abort() drops the flush lock */ |
6083 | xfs_iflush_abort(ip, false); |
6084 | goto reclaim; |
6085 | } |
6086 | @@ -989,10 +991,10 @@ xfs_reclaim_inode( |
6087 | goto out_ifunlock; |
6088 | xfs_iunpin_wait(ip); |
6089 | } |
6090 | - if (xfs_iflags_test(ip, XFS_ISTALE)) |
6091 | - goto reclaim; |
6092 | - if (xfs_inode_clean(ip)) |
6093 | + if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { |
6094 | + xfs_ifunlock(ip); |
6095 | goto reclaim; |
6096 | + } |
6097 | |
6098 | /* |
6099 | * Never flush out dirty data during non-blocking reclaim, as it would |
6100 | @@ -1030,25 +1032,24 @@ xfs_reclaim_inode( |
6101 | xfs_buf_relse(bp); |
6102 | } |
6103 | |
6104 | - xfs_iflock(ip); |
6105 | reclaim: |
6106 | + ASSERT(!xfs_isiflocked(ip)); |
6107 | + |
6108 | /* |
6109 | * Because we use RCU freeing we need to ensure the inode always appears |
6110 | * to be reclaimed with an invalid inode number when in the free state. |
6111 | - * We do this as early as possible under the ILOCK and flush lock so |
6112 | - * that xfs_iflush_cluster() can be guaranteed to detect races with us |
6113 | - * here. By doing this, we guarantee that once xfs_iflush_cluster has |
6114 | - * locked both the XFS_ILOCK and the flush lock that it will see either |
6115 | - * a valid, flushable inode that will serialise correctly against the |
6116 | - * locks below, or it will see a clean (and invalid) inode that it can |
6117 | - * skip. |
6118 | + * We do this as early as possible under the ILOCK so that |
6119 | + * xfs_iflush_cluster() can be guaranteed to detect races with us here. |
6120 | + * By doing this, we guarantee that once xfs_iflush_cluster has locked |
6121 | + * XFS_ILOCK that it will see either a valid, flushable inode that will |
6122 | + * serialise correctly, or it will see a clean (and invalid) inode that |
6123 | + * it can skip. |
6124 | */ |
6125 | spin_lock(&ip->i_flags_lock); |
6126 | ip->i_flags = XFS_IRECLAIM; |
6127 | ip->i_ino = 0; |
6128 | spin_unlock(&ip->i_flags_lock); |
6129 | |
6130 | - xfs_ifunlock(ip); |
6131 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
6132 | |
6133 | XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); |
6134 | @@ -1580,10 +1581,15 @@ xfs_inode_free_cowblocks( |
6135 | struct xfs_eofblocks *eofb = args; |
6136 | bool need_iolock = true; |
6137 | int match; |
6138 | + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); |
6139 | |
6140 | ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0)); |
6141 | |
6142 | - if (!xfs_reflink_has_real_cow_blocks(ip)) { |
6143 | + /* |
6144 | + * Just clear the tag if we have an empty cow fork or none at all. It's |
6145 | + * possible the inode was fully unshared since it was originally tagged. |
6146 | + */ |
6147 | + if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) { |
6148 | trace_xfs_inode_free_cowblocks_invalid(ip); |
6149 | xfs_inode_clear_cowblocks_tag(ip); |
6150 | return 0; |
6151 | @@ -1593,7 +1599,8 @@ xfs_inode_free_cowblocks( |
6152 | * If the mapping is dirty or under writeback we cannot touch the |
6153 | * CoW fork. Leave it alone if we're in the midst of a directio. |
6154 | */ |
6155 | - if (mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || |
6156 | + if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || |
6157 | + mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || |
6158 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || |
6159 | atomic_read(&VFS_I(ip)->i_dio_count)) |
6160 | return 0; |
6161 | diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c |
6162 | index 4e560e6a12c1..512ff13ed66a 100644 |
6163 | --- a/fs/xfs/xfs_inode.c |
6164 | +++ b/fs/xfs/xfs_inode.c |
6165 | @@ -2041,7 +2041,6 @@ xfs_iunlink( |
6166 | agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); |
6167 | offset = offsetof(xfs_agi_t, agi_unlinked) + |
6168 | (sizeof(xfs_agino_t) * bucket_index); |
6169 | - xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF); |
6170 | xfs_trans_log_buf(tp, agibp, offset, |
6171 | (offset + sizeof(xfs_agino_t) - 1)); |
6172 | return 0; |
6173 | @@ -2133,7 +2132,6 @@ xfs_iunlink_remove( |
6174 | agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); |
6175 | offset = offsetof(xfs_agi_t, agi_unlinked) + |
6176 | (sizeof(xfs_agino_t) * bucket_index); |
6177 | - xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF); |
6178 | xfs_trans_log_buf(tp, agibp, offset, |
6179 | (offset + sizeof(xfs_agino_t) - 1)); |
6180 | } else { |
6181 | diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h |
6182 | index f14c1de2549d..71e8a81c91a3 100644 |
6183 | --- a/fs/xfs/xfs_inode.h |
6184 | +++ b/fs/xfs/xfs_inode.h |
6185 | @@ -246,6 +246,11 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip) |
6186 | * Synchronize processes attempting to flush the in-core inode back to disk. |
6187 | */ |
6188 | |
6189 | +static inline int xfs_isiflocked(struct xfs_inode *ip) |
6190 | +{ |
6191 | + return xfs_iflags_test(ip, XFS_IFLOCK); |
6192 | +} |
6193 | + |
6194 | extern void __xfs_iflock(struct xfs_inode *ip); |
6195 | |
6196 | static inline int xfs_iflock_nowait(struct xfs_inode *ip) |
6197 | @@ -261,16 +266,12 @@ static inline void xfs_iflock(struct xfs_inode *ip) |
6198 | |
6199 | static inline void xfs_ifunlock(struct xfs_inode *ip) |
6200 | { |
6201 | + ASSERT(xfs_isiflocked(ip)); |
6202 | xfs_iflags_clear(ip, XFS_IFLOCK); |
6203 | smp_mb(); |
6204 | wake_up_bit(&ip->i_flags, __XFS_IFLOCK_BIT); |
6205 | } |
6206 | |
6207 | -static inline int xfs_isiflocked(struct xfs_inode *ip) |
6208 | -{ |
6209 | - return xfs_iflags_test(ip, XFS_IFLOCK); |
6210 | -} |
6211 | - |
6212 | /* |
6213 | * Flags for inode locking. |
6214 | * Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield) |
6215 | diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c |
6216 | index 9610e9c00952..d90e7811ccdd 100644 |
6217 | --- a/fs/xfs/xfs_inode_item.c |
6218 | +++ b/fs/xfs/xfs_inode_item.c |
6219 | @@ -164,7 +164,7 @@ xfs_inode_item_format_data_fork( |
6220 | struct xfs_bmbt_rec *p; |
6221 | |
6222 | ASSERT(ip->i_df.if_u1.if_extents != NULL); |
6223 | - ASSERT(ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) > 0); |
6224 | + ASSERT(xfs_iext_count(&ip->i_df) > 0); |
6225 | |
6226 | p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT); |
6227 | data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK); |
6228 | @@ -261,7 +261,7 @@ xfs_inode_item_format_attr_fork( |
6229 | ip->i_afp->if_bytes > 0) { |
6230 | struct xfs_bmbt_rec *p; |
6231 | |
6232 | - ASSERT(ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) == |
6233 | + ASSERT(xfs_iext_count(ip->i_afp) == |
6234 | ip->i_d.di_anextents); |
6235 | ASSERT(ip->i_afp->if_u1.if_extents != NULL); |
6236 | |
6237 | diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c |
6238 | index c245bed3249b..a39197501a7c 100644 |
6239 | --- a/fs/xfs/xfs_ioctl.c |
6240 | +++ b/fs/xfs/xfs_ioctl.c |
6241 | @@ -910,16 +910,14 @@ xfs_ioc_fsgetxattr( |
6242 | if (attr) { |
6243 | if (ip->i_afp) { |
6244 | if (ip->i_afp->if_flags & XFS_IFEXTENTS) |
6245 | - fa.fsx_nextents = ip->i_afp->if_bytes / |
6246 | - sizeof(xfs_bmbt_rec_t); |
6247 | + fa.fsx_nextents = xfs_iext_count(ip->i_afp); |
6248 | else |
6249 | fa.fsx_nextents = ip->i_d.di_anextents; |
6250 | } else |
6251 | fa.fsx_nextents = 0; |
6252 | } else { |
6253 | if (ip->i_df.if_flags & XFS_IFEXTENTS) |
6254 | - fa.fsx_nextents = ip->i_df.if_bytes / |
6255 | - sizeof(xfs_bmbt_rec_t); |
6256 | + fa.fsx_nextents = xfs_iext_count(&ip->i_df); |
6257 | else |
6258 | fa.fsx_nextents = ip->i_d.di_nextents; |
6259 | } |
6260 | diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c |
6261 | index 436e109bb01e..15a83813b708 100644 |
6262 | --- a/fs/xfs/xfs_iomap.c |
6263 | +++ b/fs/xfs/xfs_iomap.c |
6264 | @@ -395,11 +395,12 @@ xfs_iomap_prealloc_size( |
6265 | struct xfs_inode *ip, |
6266 | loff_t offset, |
6267 | loff_t count, |
6268 | - xfs_extnum_t idx, |
6269 | - struct xfs_bmbt_irec *prev) |
6270 | + xfs_extnum_t idx) |
6271 | { |
6272 | struct xfs_mount *mp = ip->i_mount; |
6273 | + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); |
6274 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
6275 | + struct xfs_bmbt_irec prev; |
6276 | int shift = 0; |
6277 | int64_t freesp; |
6278 | xfs_fsblock_t qblocks; |
6279 | @@ -419,8 +420,8 @@ xfs_iomap_prealloc_size( |
6280 | */ |
6281 | if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) || |
6282 | XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || |
6283 | - idx == 0 || |
6284 | - prev->br_startoff + prev->br_blockcount < offset_fsb) |
6285 | + !xfs_iext_get_extent(ifp, idx - 1, &prev) || |
6286 | + prev.br_startoff + prev.br_blockcount < offset_fsb) |
6287 | return mp->m_writeio_blocks; |
6288 | |
6289 | /* |
6290 | @@ -439,8 +440,8 @@ xfs_iomap_prealloc_size( |
6291 | * always extends to MAXEXTLEN rather than falling short due to things |
6292 | * like stripe unit/width alignment of real extents. |
6293 | */ |
6294 | - if (prev->br_blockcount <= (MAXEXTLEN >> 1)) |
6295 | - alloc_blocks = prev->br_blockcount << 1; |
6296 | + if (prev.br_blockcount <= (MAXEXTLEN >> 1)) |
6297 | + alloc_blocks = prev.br_blockcount << 1; |
6298 | else |
6299 | alloc_blocks = XFS_B_TO_FSB(mp, offset); |
6300 | if (!alloc_blocks) |
6301 | @@ -535,11 +536,11 @@ xfs_file_iomap_begin_delay( |
6302 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
6303 | xfs_fileoff_t maxbytes_fsb = |
6304 | XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); |
6305 | - xfs_fileoff_t end_fsb, orig_end_fsb; |
6306 | + xfs_fileoff_t end_fsb; |
6307 | int error = 0, eof = 0; |
6308 | struct xfs_bmbt_irec got; |
6309 | - struct xfs_bmbt_irec prev; |
6310 | xfs_extnum_t idx; |
6311 | + xfs_fsblock_t prealloc_blocks = 0; |
6312 | |
6313 | ASSERT(!XFS_IS_REALTIME_INODE(ip)); |
6314 | ASSERT(!xfs_get_extsz_hint(ip)); |
6315 | @@ -563,8 +564,7 @@ xfs_file_iomap_begin_delay( |
6316 | goto out_unlock; |
6317 | } |
6318 | |
6319 | - xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx, |
6320 | - &got, &prev); |
6321 | + eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got); |
6322 | if (!eof && got.br_startoff <= offset_fsb) { |
6323 | if (xfs_is_reflink_inode(ip)) { |
6324 | bool shared; |
6325 | @@ -595,35 +595,32 @@ xfs_file_iomap_begin_delay( |
6326 | * the lower level functions are updated. |
6327 | */ |
6328 | count = min_t(loff_t, count, 1024 * PAGE_SIZE); |
6329 | - end_fsb = orig_end_fsb = |
6330 | - min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb); |
6331 | + end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb); |
6332 | |
6333 | if (eof) { |
6334 | - xfs_fsblock_t prealloc_blocks; |
6335 | - |
6336 | - prealloc_blocks = |
6337 | - xfs_iomap_prealloc_size(ip, offset, count, idx, &prev); |
6338 | + prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count, idx); |
6339 | if (prealloc_blocks) { |
6340 | xfs_extlen_t align; |
6341 | xfs_off_t end_offset; |
6342 | + xfs_fileoff_t p_end_fsb; |
6343 | |
6344 | end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1); |
6345 | - end_fsb = XFS_B_TO_FSBT(mp, end_offset) + |
6346 | - prealloc_blocks; |
6347 | + p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) + |
6348 | + prealloc_blocks; |
6349 | |
6350 | align = xfs_eof_alignment(ip, 0); |
6351 | if (align) |
6352 | - end_fsb = roundup_64(end_fsb, align); |
6353 | + p_end_fsb = roundup_64(p_end_fsb, align); |
6354 | |
6355 | - end_fsb = min(end_fsb, maxbytes_fsb); |
6356 | - ASSERT(end_fsb > offset_fsb); |
6357 | + p_end_fsb = min(p_end_fsb, maxbytes_fsb); |
6358 | + ASSERT(p_end_fsb > offset_fsb); |
6359 | + prealloc_blocks = p_end_fsb - end_fsb; |
6360 | } |
6361 | } |
6362 | |
6363 | retry: |
6364 | error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb, |
6365 | - end_fsb - offset_fsb, &got, |
6366 | - &prev, &idx, eof); |
6367 | + end_fsb - offset_fsb, prealloc_blocks, &got, &idx, eof); |
6368 | switch (error) { |
6369 | case 0: |
6370 | break; |
6371 | @@ -631,8 +628,8 @@ xfs_file_iomap_begin_delay( |
6372 | case -EDQUOT: |
6373 | /* retry without any preallocation */ |
6374 | trace_xfs_delalloc_enospc(ip, offset, count); |
6375 | - if (end_fsb != orig_end_fsb) { |
6376 | - end_fsb = orig_end_fsb; |
6377 | + if (prealloc_blocks) { |
6378 | + prealloc_blocks = 0; |
6379 | goto retry; |
6380 | } |
6381 | /*FALLTHRU*/ |
6382 | @@ -640,13 +637,6 @@ xfs_file_iomap_begin_delay( |
6383 | goto out_unlock; |
6384 | } |
6385 | |
6386 | - /* |
6387 | - * Tag the inode as speculatively preallocated so we can reclaim this |
6388 | - * space on demand, if necessary. |
6389 | - */ |
6390 | - if (end_fsb != orig_end_fsb) |
6391 | - xfs_inode_set_eofblocks_tag(ip); |
6392 | - |
6393 | trace_xfs_iomap_alloc(ip, offset, count, 0, &got); |
6394 | done: |
6395 | if (isnullstartblock(got.br_startblock)) |
6396 | diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c |
6397 | index 2d91f5ab7538..9b3d7c76915d 100644 |
6398 | --- a/fs/xfs/xfs_log_recover.c |
6399 | +++ b/fs/xfs/xfs_log_recover.c |
6400 | @@ -4929,7 +4929,6 @@ xlog_recover_clear_agi_bucket( |
6401 | agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); |
6402 | offset = offsetof(xfs_agi_t, agi_unlinked) + |
6403 | (sizeof(xfs_agino_t) * bucket); |
6404 | - xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF); |
6405 | xfs_trans_log_buf(tp, agibp, offset, |
6406 | (offset + sizeof(xfs_agino_t) - 1)); |
6407 | |
6408 | diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c |
6409 | index a60d9e2739d1..45e50ea90769 100644 |
6410 | --- a/fs/xfs/xfs_qm.c |
6411 | +++ b/fs/xfs/xfs_qm.c |
6412 | @@ -1135,7 +1135,7 @@ xfs_qm_get_rtblks( |
6413 | return error; |
6414 | } |
6415 | rtblks = 0; |
6416 | - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
6417 | + nextents = xfs_iext_count(ifp); |
6418 | for (idx = 0; idx < nextents; idx++) |
6419 | rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx)); |
6420 | *O_rtblks = (xfs_qcnt_t)rtblks; |
6421 | diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c |
6422 | index fe86a668a57e..6e4c7446c3d4 100644 |
6423 | --- a/fs/xfs/xfs_refcount_item.c |
6424 | +++ b/fs/xfs/xfs_refcount_item.c |
6425 | @@ -526,13 +526,14 @@ xfs_cui_recover( |
6426 | xfs_refcount_finish_one_cleanup(tp, rcur, error); |
6427 | error = xfs_defer_finish(&tp, &dfops, NULL); |
6428 | if (error) |
6429 | - goto abort_error; |
6430 | + goto abort_defer; |
6431 | set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags); |
6432 | error = xfs_trans_commit(tp); |
6433 | return error; |
6434 | |
6435 | abort_error: |
6436 | xfs_refcount_finish_one_cleanup(tp, rcur, error); |
6437 | +abort_defer: |
6438 | xfs_defer_cancel(&dfops); |
6439 | xfs_trans_cancel(tp); |
6440 | return error; |
6441 | diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c |
6442 | index a279b4e7f5fe..4d3f74e3c5e1 100644 |
6443 | --- a/fs/xfs/xfs_reflink.c |
6444 | +++ b/fs/xfs/xfs_reflink.c |
6445 | @@ -243,12 +243,11 @@ xfs_reflink_reserve_cow( |
6446 | struct xfs_bmbt_irec *imap, |
6447 | bool *shared) |
6448 | { |
6449 | - struct xfs_bmbt_irec got, prev; |
6450 | - xfs_fileoff_t end_fsb, orig_end_fsb; |
6451 | - int eof = 0, error = 0; |
6452 | - bool trimmed; |
6453 | + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); |
6454 | + struct xfs_bmbt_irec got; |
6455 | + int error = 0; |
6456 | + bool eof = false, trimmed; |
6457 | xfs_extnum_t idx; |
6458 | - xfs_extlen_t align; |
6459 | |
6460 | /* |
6461 | * Search the COW fork extent list first. This serves two purposes: |
6462 | @@ -258,8 +257,9 @@ xfs_reflink_reserve_cow( |
6463 | * extent list is generally faster than going out to the shared extent |
6464 | * tree. |
6465 | */ |
6466 | - xfs_bmap_search_extents(ip, imap->br_startoff, XFS_COW_FORK, &eof, &idx, |
6467 | - &got, &prev); |
6468 | + |
6469 | + if (!xfs_iext_lookup_extent(ip, ifp, imap->br_startoff, &idx, &got)) |
6470 | + eof = true; |
6471 | if (!eof && got.br_startoff <= imap->br_startoff) { |
6472 | trace_xfs_reflink_cow_found(ip, imap); |
6473 | xfs_trim_extent(imap, got.br_startoff, got.br_blockcount); |
6474 | @@ -285,33 +285,12 @@ xfs_reflink_reserve_cow( |
6475 | if (error) |
6476 | return error; |
6477 | |
6478 | - end_fsb = orig_end_fsb = imap->br_startoff + imap->br_blockcount; |
6479 | - |
6480 | - align = xfs_eof_alignment(ip, xfs_get_cowextsz_hint(ip)); |
6481 | - if (align) |
6482 | - end_fsb = roundup_64(end_fsb, align); |
6483 | - |
6484 | -retry: |
6485 | error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, imap->br_startoff, |
6486 | - end_fsb - imap->br_startoff, &got, &prev, &idx, eof); |
6487 | - switch (error) { |
6488 | - case 0: |
6489 | - break; |
6490 | - case -ENOSPC: |
6491 | - case -EDQUOT: |
6492 | - /* retry without any preallocation */ |
6493 | + imap->br_blockcount, 0, &got, &idx, eof); |
6494 | + if (error == -ENOSPC || error == -EDQUOT) |
6495 | trace_xfs_reflink_cow_enospc(ip, imap); |
6496 | - if (end_fsb != orig_end_fsb) { |
6497 | - end_fsb = orig_end_fsb; |
6498 | - goto retry; |
6499 | - } |
6500 | - /*FALLTHRU*/ |
6501 | - default: |
6502 | + if (error) |
6503 | return error; |
6504 | - } |
6505 | - |
6506 | - if (end_fsb != orig_end_fsb) |
6507 | - xfs_inode_set_cowblocks_tag(ip); |
6508 | |
6509 | trace_xfs_reflink_cow_alloc(ip, &got); |
6510 | return 0; |
6511 | @@ -486,7 +465,7 @@ xfs_reflink_trim_irec_to_next_cow( |
6512 | /* This is the extent before; try sliding up one. */ |
6513 | if (irec.br_startoff < offset_fsb) { |
6514 | idx++; |
6515 | - if (idx >= ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) |
6516 | + if (idx >= xfs_iext_count(ifp)) |
6517 | return 0; |
6518 | gotp = xfs_iext_get_ext(ifp, idx); |
6519 | xfs_bmbt_get_all(gotp, &irec); |
6520 | @@ -566,7 +545,7 @@ xfs_reflink_cancel_cow_blocks( |
6521 | xfs_bmap_del_extent_cow(ip, &idx, &got, &del); |
6522 | } |
6523 | |
6524 | - if (++idx >= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)) |
6525 | + if (++idx >= xfs_iext_count(ifp)) |
6526 | break; |
6527 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got); |
6528 | } |
6529 | @@ -1345,8 +1324,14 @@ xfs_reflink_remap_range( |
6530 | goto out_unlock; |
6531 | } |
6532 | |
6533 | - if (len == 0) |
6534 | + /* Zero length dedupe exits immediately; reflink goes to EOF. */ |
6535 | + if (len == 0) { |
6536 | + if (is_dedupe) { |
6537 | + ret = 0; |
6538 | + goto out_unlock; |
6539 | + } |
6540 | len = isize - pos_in; |
6541 | + } |
6542 | |
6543 | /* Ensure offsets don't wrap and the input is inside i_size */ |
6544 | if (pos_in + len < pos_in || pos_out + len < pos_out || |
6545 | @@ -1697,37 +1682,3 @@ xfs_reflink_unshare( |
6546 | trace_xfs_reflink_unshare_error(ip, error, _RET_IP_); |
6547 | return error; |
6548 | } |
6549 | - |
6550 | -/* |
6551 | - * Does this inode have any real CoW reservations? |
6552 | - */ |
6553 | -bool |
6554 | -xfs_reflink_has_real_cow_blocks( |
6555 | - struct xfs_inode *ip) |
6556 | -{ |
6557 | - struct xfs_bmbt_irec irec; |
6558 | - struct xfs_ifork *ifp; |
6559 | - struct xfs_bmbt_rec_host *gotp; |
6560 | - xfs_extnum_t idx; |
6561 | - |
6562 | - if (!xfs_is_reflink_inode(ip)) |
6563 | - return false; |
6564 | - |
6565 | - /* Go find the old extent in the CoW fork. */ |
6566 | - ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); |
6567 | - gotp = xfs_iext_bno_to_ext(ifp, 0, &idx); |
6568 | - while (gotp) { |
6569 | - xfs_bmbt_get_all(gotp, &irec); |
6570 | - |
6571 | - if (!isnullstartblock(irec.br_startblock)) |
6572 | - return true; |
6573 | - |
6574 | - /* Roll on... */ |
6575 | - idx++; |
6576 | - if (idx >= ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) |
6577 | - break; |
6578 | - gotp = xfs_iext_get_ext(ifp, idx); |
6579 | - } |
6580 | - |
6581 | - return false; |
6582 | -} |
6583 | diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h |
6584 | index fad11607c9ad..97ea9b487884 100644 |
6585 | --- a/fs/xfs/xfs_reflink.h |
6586 | +++ b/fs/xfs/xfs_reflink.h |
6587 | @@ -50,6 +50,4 @@ extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip, |
6588 | extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset, |
6589 | xfs_off_t len); |
6590 | |
6591 | -extern bool xfs_reflink_has_real_cow_blocks(struct xfs_inode *ip); |
6592 | - |
6593 | #endif /* __XFS_REFLINK_H */ |
6594 | diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c |
6595 | index 276d3023d60f..de6195e38910 100644 |
6596 | --- a/fs/xfs/xfs_sysfs.c |
6597 | +++ b/fs/xfs/xfs_sysfs.c |
6598 | @@ -396,7 +396,7 @@ max_retries_show( |
6599 | int retries; |
6600 | struct xfs_error_cfg *cfg = to_error_cfg(kobject); |
6601 | |
6602 | - if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER) |
6603 | + if (cfg->max_retries == XFS_ERR_RETRY_FOREVER) |
6604 | retries = -1; |
6605 | else |
6606 | retries = cfg->max_retries; |
6607 | @@ -422,7 +422,7 @@ max_retries_store( |
6608 | return -EINVAL; |
6609 | |
6610 | if (val == -1) |
6611 | - cfg->retry_timeout = XFS_ERR_RETRY_FOREVER; |
6612 | + cfg->max_retries = XFS_ERR_RETRY_FOREVER; |
6613 | else |
6614 | cfg->max_retries = val; |
6615 | return count; |
6616 | diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h |
6617 | index df13637e4017..939869c772b1 100644 |
6618 | --- a/include/asm-generic/asm-prototypes.h |
6619 | +++ b/include/asm-generic/asm-prototypes.h |
6620 | @@ -1,7 +1,13 @@ |
6621 | #include <linux/bitops.h> |
6622 | +#undef __memset |
6623 | extern void *__memset(void *, int, __kernel_size_t); |
6624 | +#undef __memcpy |
6625 | extern void *__memcpy(void *, const void *, __kernel_size_t); |
6626 | +#undef __memmove |
6627 | extern void *__memmove(void *, const void *, __kernel_size_t); |
6628 | +#undef memset |
6629 | extern void *memset(void *, int, __kernel_size_t); |
6630 | +#undef memcpy |
6631 | extern void *memcpy(void *, const void *, __kernel_size_t); |
6632 | +#undef memmove |
6633 | extern void *memmove(void *, const void *, __kernel_size_t); |
6634 | diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h |
6635 | index afe641c02dca..ba1cad7b97cf 100644 |
6636 | --- a/include/linux/cpuhotplug.h |
6637 | +++ b/include/linux/cpuhotplug.h |
6638 | @@ -80,7 +80,6 @@ enum cpuhp_state { |
6639 | CPUHP_AP_ARM_L2X0_STARTING, |
6640 | CPUHP_AP_ARM_ARCH_TIMER_STARTING, |
6641 | CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, |
6642 | - CPUHP_AP_DUMMY_TIMER_STARTING, |
6643 | CPUHP_AP_JCORE_TIMER_STARTING, |
6644 | CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, |
6645 | CPUHP_AP_ARM_TWD_STARTING, |
6646 | @@ -94,6 +93,8 @@ enum cpuhp_state { |
6647 | CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, |
6648 | CPUHP_AP_KVM_ARM_VGIC_STARTING, |
6649 | CPUHP_AP_KVM_ARM_TIMER_STARTING, |
6650 | + /* Must be the last timer callback */ |
6651 | + CPUHP_AP_DUMMY_TIMER_STARTING, |
6652 | CPUHP_AP_ARM_XEN_STARTING, |
6653 | CPUHP_AP_ARM_CORESIGHT_STARTING, |
6654 | CPUHP_AP_ARM_CORESIGHT4_STARTING, |
6655 | diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h |
6656 | index 4d3f0d1aec73..1b413a9aab81 100644 |
6657 | --- a/include/linux/debugfs.h |
6658 | +++ b/include/linux/debugfs.h |
6659 | @@ -62,6 +62,21 @@ static inline const struct file_operations *debugfs_real_fops(struct file *filp) |
6660 | return filp->f_path.dentry->d_fsdata; |
6661 | } |
6662 | |
6663 | +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ |
6664 | +static int __fops ## _open(struct inode *inode, struct file *file) \ |
6665 | +{ \ |
6666 | + __simple_attr_check_format(__fmt, 0ull); \ |
6667 | + return simple_attr_open(inode, file, __get, __set, __fmt); \ |
6668 | +} \ |
6669 | +static const struct file_operations __fops = { \ |
6670 | + .owner = THIS_MODULE, \ |
6671 | + .open = __fops ## _open, \ |
6672 | + .release = simple_attr_release, \ |
6673 | + .read = debugfs_attr_read, \ |
6674 | + .write = debugfs_attr_write, \ |
6675 | + .llseek = generic_file_llseek, \ |
6676 | +} |
6677 | + |
6678 | #if defined(CONFIG_DEBUG_FS) |
6679 | |
6680 | struct dentry *debugfs_create_file(const char *name, umode_t mode, |
6681 | @@ -99,21 +114,6 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf, |
6682 | ssize_t debugfs_attr_write(struct file *file, const char __user *buf, |
6683 | size_t len, loff_t *ppos); |
6684 | |
6685 | -#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ |
6686 | -static int __fops ## _open(struct inode *inode, struct file *file) \ |
6687 | -{ \ |
6688 | - __simple_attr_check_format(__fmt, 0ull); \ |
6689 | - return simple_attr_open(inode, file, __get, __set, __fmt); \ |
6690 | -} \ |
6691 | -static const struct file_operations __fops = { \ |
6692 | - .owner = THIS_MODULE, \ |
6693 | - .open = __fops ## _open, \ |
6694 | - .release = simple_attr_release, \ |
6695 | - .read = debugfs_attr_read, \ |
6696 | - .write = debugfs_attr_write, \ |
6697 | - .llseek = generic_file_llseek, \ |
6698 | -} |
6699 | - |
6700 | struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, |
6701 | struct dentry *new_dir, const char *new_name); |
6702 | |
6703 | @@ -233,8 +233,18 @@ static inline void debugfs_use_file_finish(int srcu_idx) |
6704 | __releases(&debugfs_srcu) |
6705 | { } |
6706 | |
6707 | -#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ |
6708 | - static const struct file_operations __fops = { 0 } |
6709 | +static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf, |
6710 | + size_t len, loff_t *ppos) |
6711 | +{ |
6712 | + return -ENODEV; |
6713 | +} |
6714 | + |
6715 | +static inline ssize_t debugfs_attr_write(struct file *file, |
6716 | + const char __user *buf, |
6717 | + size_t len, loff_t *ppos) |
6718 | +{ |
6719 | + return -ENODEV; |
6720 | +} |
6721 | |
6722 | static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, |
6723 | struct dentry *new_dir, char *new_name) |
6724 | diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h |
6725 | index 228bd44efa4c..497f2b3a5a62 100644 |
6726 | --- a/include/linux/iio/common/st_sensors.h |
6727 | +++ b/include/linux/iio/common/st_sensors.h |
6728 | @@ -116,6 +116,16 @@ struct st_sensor_bdu { |
6729 | }; |
6730 | |
6731 | /** |
6732 | + * struct st_sensor_das - ST sensor device data alignment selection |
6733 | + * @addr: address of the register. |
6734 | + * @mask: mask to write the das flag for left alignment. |
6735 | + */ |
6736 | +struct st_sensor_das { |
6737 | + u8 addr; |
6738 | + u8 mask; |
6739 | +}; |
6740 | + |
6741 | +/** |
6742 | * struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt |
6743 | * @addr: address of the register. |
6744 | * @mask_int1: mask to enable/disable IRQ on INT1 pin. |
6745 | @@ -185,6 +195,7 @@ struct st_sensor_transfer_function { |
6746 | * @enable_axis: Enable one or more axis of the sensor. |
6747 | * @fs: Full scale register and full scale list available. |
6748 | * @bdu: Block data update register. |
6749 | + * @das: Data Alignment Selection register. |
6750 | * @drdy_irq: Data ready register of the sensor. |
6751 | * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. |
6752 | * @bootime: samples to discard when sensor passing from power-down to power-up. |
6753 | @@ -200,6 +211,7 @@ struct st_sensor_settings { |
6754 | struct st_sensor_axis enable_axis; |
6755 | struct st_sensor_fullscale fs; |
6756 | struct st_sensor_bdu bdu; |
6757 | + struct st_sensor_das das; |
6758 | struct st_sensor_data_ready_irq drdy_irq; |
6759 | bool multi_read_bit; |
6760 | unsigned int bootime; |
6761 | diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h |
6762 | index c58752fe16c4..f020ab4079d3 100644 |
6763 | --- a/include/linux/pci_ids.h |
6764 | +++ b/include/linux/pci_ids.h |
6765 | @@ -2256,12 +2256,29 @@ |
6766 | #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 |
6767 | |
6768 | #define PCI_VENDOR_ID_MELLANOX 0x15b3 |
6769 | -#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 |
6770 | +#define PCI_DEVICE_ID_MELLANOX_CONNECTX3 0x1003 |
6771 | +#define PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO 0x1007 |
6772 | +#define PCI_DEVICE_ID_MELLANOX_CONNECTIB 0x1011 |
6773 | +#define PCI_DEVICE_ID_MELLANOX_CONNECTX4 0x1013 |
6774 | +#define PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX 0x1015 |
6775 | +#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 |
6776 | #define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46 |
6777 | -#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 |
6778 | -#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 |
6779 | -#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c |
6780 | -#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 |
6781 | +#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c |
6782 | +#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 |
6783 | +#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 |
6784 | +#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 |
6785 | +#define PCI_DEVICE_ID_MELLANOX_HERMON_SDR 0x6340 |
6786 | +#define PCI_DEVICE_ID_MELLANOX_HERMON_DDR 0x634a |
6787 | +#define PCI_DEVICE_ID_MELLANOX_HERMON_QDR 0x6354 |
6788 | +#define PCI_DEVICE_ID_MELLANOX_HERMON_EN 0x6368 |
6789 | +#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN 0x6372 |
6790 | +#define PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2 0x6732 |
6791 | +#define PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2 0x673c |
6792 | +#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2 0x6746 |
6793 | +#define PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2 0x6750 |
6794 | +#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2 0x675a |
6795 | +#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2 0x6764 |
6796 | +#define PCI_DEVICE_ID_MELLANOX_CONNECTX2 0x676e |
6797 | |
6798 | #define PCI_VENDOR_ID_DFI 0x15bd |
6799 | |
6800 | diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h |
6801 | index 8e81f9eb95e4..e4516e9ded0f 100644 |
6802 | --- a/include/linux/usb/gadget.h |
6803 | +++ b/include/linux/usb/gadget.h |
6804 | @@ -429,7 +429,9 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev) |
6805 | */ |
6806 | static inline size_t usb_ep_align(struct usb_ep *ep, size_t len) |
6807 | { |
6808 | - return round_up(len, (size_t)le16_to_cpu(ep->desc->wMaxPacketSize)); |
6809 | + int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff; |
6810 | + |
6811 | + return round_up(len, max_packet_size); |
6812 | } |
6813 | |
6814 | /** |
6815 | diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h |
6816 | index 56368e9b4622..d3cbe48b286d 100644 |
6817 | --- a/include/uapi/linux/nl80211.h |
6818 | +++ b/include/uapi/linux/nl80211.h |
6819 | @@ -323,7 +323,7 @@ |
6820 | * @NL80211_CMD_GET_SCAN: get scan results |
6821 | * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters |
6822 | * %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the |
6823 | - * probe requests at CCK rate or not. %NL80211_ATTR_MAC can be used to |
6824 | + * probe requests at CCK rate or not. %NL80211_ATTR_BSSID can be used to |
6825 | * specify a BSSID to scan for; if not included, the wildcard BSSID will |
6826 | * be used. |
6827 | * @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to |
6828 | @@ -1937,6 +1937,9 @@ enum nl80211_commands { |
6829 | * @NL80211_ATTR_NAN_MATCH: used to report a match. This is a nested attribute. |
6830 | * See &enum nl80211_nan_match_attributes. |
6831 | * |
6832 | + * @NL80211_ATTR_BSSID: The BSSID of the AP. Note that %NL80211_ATTR_MAC is also |
6833 | + * used in various commands/events for specifying the BSSID. |
6834 | + * |
6835 | * @NUM_NL80211_ATTR: total number of nl80211_attrs available |
6836 | * @NL80211_ATTR_MAX: highest attribute number currently defined |
6837 | * @__NL80211_ATTR_AFTER_LAST: internal use |
6838 | @@ -2336,6 +2339,8 @@ enum nl80211_attrs { |
6839 | NL80211_ATTR_NAN_FUNC, |
6840 | NL80211_ATTR_NAN_MATCH, |
6841 | |
6842 | + NL80211_ATTR_BSSID, |
6843 | + |
6844 | /* add attributes here, update the policy in nl80211.c */ |
6845 | |
6846 | __NL80211_ATTR_AFTER_LAST, |
6847 | diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c |
6848 | index 17f51d63da56..668f51b861f7 100644 |
6849 | --- a/kernel/irq/affinity.c |
6850 | +++ b/kernel/irq/affinity.c |
6851 | @@ -37,10 +37,10 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, |
6852 | |
6853 | static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk) |
6854 | { |
6855 | - int n, nodes; |
6856 | + int n, nodes = 0; |
6857 | |
6858 | /* Calculate the number of nodes in the supplied affinity mask */ |
6859 | - for (n = 0, nodes = 0; n < num_online_nodes(); n++) { |
6860 | + for_each_online_node(n) { |
6861 | if (cpumask_intersects(mask, cpumask_of_node(n))) { |
6862 | node_set(n, *nodemsk); |
6863 | nodes++; |
6864 | @@ -81,7 +81,7 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, |
6865 | nodes = get_nodes_in_cpumask(affinity, &nodemsk); |
6866 | |
6867 | /* |
6868 | - * If the number of nodes in the mask is less than or equal the |
6869 | + * If the number of nodes in the mask is greater than or equal the |
6870 | * number of vectors we just spread the vectors across the nodes. |
6871 | */ |
6872 | if (nvec <= nodes) { |
6873 | diff --git a/kernel/relay.c b/kernel/relay.c |
6874 | index da79a109dbeb..8f18d314a96a 100644 |
6875 | --- a/kernel/relay.c |
6876 | +++ b/kernel/relay.c |
6877 | @@ -809,11 +809,11 @@ void relay_subbufs_consumed(struct rchan *chan, |
6878 | { |
6879 | struct rchan_buf *buf; |
6880 | |
6881 | - if (!chan) |
6882 | + if (!chan || cpu >= NR_CPUS) |
6883 | return; |
6884 | |
6885 | buf = *per_cpu_ptr(chan->buf, cpu); |
6886 | - if (cpu >= NR_CPUS || !buf || subbufs_consumed > chan->n_subbufs) |
6887 | + if (!buf || subbufs_consumed > chan->n_subbufs) |
6888 | return; |
6889 | |
6890 | if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed) |
6891 | diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c |
6892 | index f6aae7977824..d2a20e83ebae 100644 |
6893 | --- a/kernel/time/tick-broadcast.c |
6894 | +++ b/kernel/time/tick-broadcast.c |
6895 | @@ -871,6 +871,9 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
6896 | { |
6897 | int cpu = smp_processor_id(); |
6898 | |
6899 | + if (!bc) |
6900 | + return; |
6901 | + |
6902 | /* Set it up only once ! */ |
6903 | if (bc->event_handler != tick_handle_oneshot_broadcast) { |
6904 | int was_periodic = clockevent_state_periodic(bc); |
6905 | diff --git a/mm/compaction.c b/mm/compaction.c |
6906 | index 0409a4ad6ea1..70e6bec46dc2 100644 |
6907 | --- a/mm/compaction.c |
6908 | +++ b/mm/compaction.c |
6909 | @@ -634,22 +634,6 @@ isolate_freepages_range(struct compact_control *cc, |
6910 | return pfn; |
6911 | } |
6912 | |
6913 | -/* Update the number of anon and file isolated pages in the zone */ |
6914 | -static void acct_isolated(struct zone *zone, struct compact_control *cc) |
6915 | -{ |
6916 | - struct page *page; |
6917 | - unsigned int count[2] = { 0, }; |
6918 | - |
6919 | - if (list_empty(&cc->migratepages)) |
6920 | - return; |
6921 | - |
6922 | - list_for_each_entry(page, &cc->migratepages, lru) |
6923 | - count[!!page_is_file_cache(page)]++; |
6924 | - |
6925 | - mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, count[0]); |
6926 | - mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, count[1]); |
6927 | -} |
6928 | - |
6929 | /* Similar to reclaim, but different enough that they don't share logic */ |
6930 | static bool too_many_isolated(struct zone *zone) |
6931 | { |
6932 | @@ -866,6 +850,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, |
6933 | |
6934 | /* Successfully isolated */ |
6935 | del_page_from_lru_list(page, lruvec, page_lru(page)); |
6936 | + inc_node_page_state(page, |
6937 | + NR_ISOLATED_ANON + page_is_file_cache(page)); |
6938 | |
6939 | isolate_success: |
6940 | list_add(&page->lru, &cc->migratepages); |
6941 | @@ -902,7 +888,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, |
6942 | spin_unlock_irqrestore(zone_lru_lock(zone), flags); |
6943 | locked = false; |
6944 | } |
6945 | - acct_isolated(zone, cc); |
6946 | putback_movable_pages(&cc->migratepages); |
6947 | cc->nr_migratepages = 0; |
6948 | cc->last_migrated_pfn = 0; |
6949 | @@ -988,7 +973,6 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, |
6950 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) |
6951 | break; |
6952 | } |
6953 | - acct_isolated(cc->zone, cc); |
6954 | |
6955 | return pfn; |
6956 | } |
6957 | @@ -1258,10 +1242,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, |
6958 | low_pfn = isolate_migratepages_block(cc, low_pfn, |
6959 | block_end_pfn, isolate_mode); |
6960 | |
6961 | - if (!low_pfn || cc->contended) { |
6962 | - acct_isolated(zone, cc); |
6963 | + if (!low_pfn || cc->contended) |
6964 | return ISOLATE_ABORT; |
6965 | - } |
6966 | |
6967 | /* |
6968 | * Either we isolated something and proceed with migration. Or |
6969 | @@ -1271,7 +1253,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, |
6970 | break; |
6971 | } |
6972 | |
6973 | - acct_isolated(zone, cc); |
6974 | /* Record where migration scanner will be restarted. */ |
6975 | cc->migrate_pfn = low_pfn; |
6976 | |
6977 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
6978 | index 418bf01a50ed..23aec01836aa 100644 |
6979 | --- a/mm/hugetlb.c |
6980 | +++ b/mm/hugetlb.c |
6981 | @@ -3450,15 +3450,17 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, |
6982 | * Keep the pte_same checks anyway to make transition from the mutex easier. |
6983 | */ |
6984 | static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, |
6985 | - unsigned long address, pte_t *ptep, pte_t pte, |
6986 | - struct page *pagecache_page, spinlock_t *ptl) |
6987 | + unsigned long address, pte_t *ptep, |
6988 | + struct page *pagecache_page, spinlock_t *ptl) |
6989 | { |
6990 | + pte_t pte; |
6991 | struct hstate *h = hstate_vma(vma); |
6992 | struct page *old_page, *new_page; |
6993 | int ret = 0, outside_reserve = 0; |
6994 | unsigned long mmun_start; /* For mmu_notifiers */ |
6995 | unsigned long mmun_end; /* For mmu_notifiers */ |
6996 | |
6997 | + pte = huge_ptep_get(ptep); |
6998 | old_page = pte_page(pte); |
6999 | |
7000 | retry_avoidcopy: |
7001 | @@ -3733,7 +3735,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, |
7002 | hugetlb_count_add(pages_per_huge_page(h), mm); |
7003 | if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { |
7004 | /* Optimization, do the COW without a second fault */ |
7005 | - ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); |
7006 | + ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); |
7007 | } |
7008 | |
7009 | spin_unlock(ptl); |
7010 | @@ -3888,8 +3890,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
7011 | |
7012 | if (flags & FAULT_FLAG_WRITE) { |
7013 | if (!huge_pte_write(entry)) { |
7014 | - ret = hugetlb_cow(mm, vma, address, ptep, entry, |
7015 | - pagecache_page, ptl); |
7016 | + ret = hugetlb_cow(mm, vma, address, ptep, |
7017 | + pagecache_page, ptl); |
7018 | goto out_put_page; |
7019 | } |
7020 | entry = huge_pte_mkdirty(entry); |
7021 | diff --git a/mm/khugepaged.c b/mm/khugepaged.c |
7022 | index 87e1a7ca3846..5d7c006373d3 100644 |
7023 | --- a/mm/khugepaged.c |
7024 | +++ b/mm/khugepaged.c |
7025 | @@ -1403,6 +1403,9 @@ static void collapse_shmem(struct mm_struct *mm, |
7026 | |
7027 | spin_lock_irq(&mapping->tree_lock); |
7028 | |
7029 | + slot = radix_tree_lookup_slot(&mapping->page_tree, index); |
7030 | + VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot, |
7031 | + &mapping->tree_lock), page); |
7032 | VM_BUG_ON_PAGE(page_mapped(page), page); |
7033 | |
7034 | /* |
7035 | @@ -1426,6 +1429,7 @@ static void collapse_shmem(struct mm_struct *mm, |
7036 | radix_tree_replace_slot(slot, |
7037 | new_page + (index % HPAGE_PMD_NR)); |
7038 | |
7039 | + slot = radix_tree_iter_next(&iter); |
7040 | index++; |
7041 | continue; |
7042 | out_lru: |
7043 | @@ -1521,9 +1525,11 @@ static void collapse_shmem(struct mm_struct *mm, |
7044 | if (!page || iter.index < page->index) { |
7045 | if (!nr_none) |
7046 | break; |
7047 | - /* Put holes back where they were */ |
7048 | - radix_tree_replace_slot(slot, NULL); |
7049 | nr_none--; |
7050 | + /* Put holes back where they were */ |
7051 | + radix_tree_delete(&mapping->page_tree, |
7052 | + iter.index); |
7053 | + slot = radix_tree_iter_next(&iter); |
7054 | continue; |
7055 | } |
7056 | |
7057 | @@ -1537,6 +1543,7 @@ static void collapse_shmem(struct mm_struct *mm, |
7058 | putback_lru_page(page); |
7059 | unlock_page(page); |
7060 | spin_lock_irq(&mapping->tree_lock); |
7061 | + slot = radix_tree_iter_next(&iter); |
7062 | } |
7063 | VM_BUG_ON(nr_none); |
7064 | spin_unlock_irq(&mapping->tree_lock); |
7065 | diff --git a/mm/migrate.c b/mm/migrate.c |
7066 | index 99250aee1ac1..66ce6b490b13 100644 |
7067 | --- a/mm/migrate.c |
7068 | +++ b/mm/migrate.c |
7069 | @@ -168,8 +168,6 @@ void putback_movable_pages(struct list_head *l) |
7070 | continue; |
7071 | } |
7072 | list_del(&page->lru); |
7073 | - dec_node_page_state(page, NR_ISOLATED_ANON + |
7074 | - page_is_file_cache(page)); |
7075 | /* |
7076 | * We isolated non-lru movable page so here we can use |
7077 | * __PageMovable because LRU page's mapping cannot have |
7078 | @@ -186,6 +184,8 @@ void putback_movable_pages(struct list_head *l) |
7079 | put_page(page); |
7080 | } else { |
7081 | putback_lru_page(page); |
7082 | + dec_node_page_state(page, NR_ISOLATED_ANON + |
7083 | + page_is_file_cache(page)); |
7084 | } |
7085 | } |
7086 | } |
7087 | @@ -1121,8 +1121,15 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page, |
7088 | * restored. |
7089 | */ |
7090 | list_del(&page->lru); |
7091 | - dec_node_page_state(page, NR_ISOLATED_ANON + |
7092 | - page_is_file_cache(page)); |
7093 | + |
7094 | + /* |
7095 | + * Compaction can migrate also non-LRU pages which are |
7096 | + * not accounted to NR_ISOLATED_*. They can be recognized |
7097 | + * as __PageMovable |
7098 | + */ |
7099 | + if (likely(!__PageMovable(page))) |
7100 | + dec_node_page_state(page, NR_ISOLATED_ANON + |
7101 | + page_is_file_cache(page)); |
7102 | } |
7103 | |
7104 | /* |
7105 | diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c |
7106 | index f6749dced021..3b5fd4188f2a 100644 |
7107 | --- a/net/mac80211/agg-rx.c |
7108 | +++ b/net/mac80211/agg-rx.c |
7109 | @@ -315,11 +315,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, |
7110 | mutex_lock(&sta->ampdu_mlme.mtx); |
7111 | |
7112 | if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) { |
7113 | - tid_agg_rx = rcu_dereference_protected( |
7114 | - sta->ampdu_mlme.tid_rx[tid], |
7115 | - lockdep_is_held(&sta->ampdu_mlme.mtx)); |
7116 | - |
7117 | - if (tid_agg_rx->dialog_token == dialog_token) { |
7118 | + if (sta->ampdu_mlme.tid_rx_token[tid] == dialog_token) { |
7119 | ht_dbg_ratelimited(sta->sdata, |
7120 | "updated AddBA Req from %pM on tid %u\n", |
7121 | sta->sta.addr, tid); |
7122 | @@ -396,7 +392,6 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, |
7123 | } |
7124 | |
7125 | /* update data */ |
7126 | - tid_agg_rx->dialog_token = dialog_token; |
7127 | tid_agg_rx->ssn = start_seq_num; |
7128 | tid_agg_rx->head_seq_num = start_seq_num; |
7129 | tid_agg_rx->buf_size = buf_size; |
7130 | @@ -418,6 +413,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, |
7131 | if (status == WLAN_STATUS_SUCCESS) { |
7132 | __set_bit(tid, sta->ampdu_mlme.agg_session_valid); |
7133 | __clear_bit(tid, sta->ampdu_mlme.unexpected_agg); |
7134 | + sta->ampdu_mlme.tid_rx_token[tid] = dialog_token; |
7135 | } |
7136 | mutex_unlock(&sta->ampdu_mlme.mtx); |
7137 | |
7138 | diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c |
7139 | index a2fcdb47a0e6..14ec63a02669 100644 |
7140 | --- a/net/mac80211/debugfs_sta.c |
7141 | +++ b/net/mac80211/debugfs_sta.c |
7142 | @@ -205,7 +205,7 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, |
7143 | p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); |
7144 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_rx); |
7145 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", |
7146 | - tid_rx ? tid_rx->dialog_token : 0); |
7147 | + tid_rx ? sta->ampdu_mlme.tid_rx_token[i] : 0); |
7148 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", |
7149 | tid_rx ? tid_rx->ssn : 0); |
7150 | |
7151 | diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h |
7152 | index ed5fcb984a01..dd06ef0b8861 100644 |
7153 | --- a/net/mac80211/sta_info.h |
7154 | +++ b/net/mac80211/sta_info.h |
7155 | @@ -184,7 +184,6 @@ struct tid_ampdu_tx { |
7156 | * @ssn: Starting Sequence Number expected to be aggregated. |
7157 | * @buf_size: buffer size for incoming A-MPDUs |
7158 | * @timeout: reset timer value (in TUs). |
7159 | - * @dialog_token: dialog token for aggregation session |
7160 | * @rcu_head: RCU head used for freeing this struct |
7161 | * @reorder_lock: serializes access to reorder buffer, see below. |
7162 | * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and |
7163 | @@ -213,7 +212,6 @@ struct tid_ampdu_rx { |
7164 | u16 ssn; |
7165 | u16 buf_size; |
7166 | u16 timeout; |
7167 | - u8 dialog_token; |
7168 | bool auto_seq; |
7169 | bool removed; |
7170 | }; |
7171 | @@ -225,6 +223,7 @@ struct tid_ampdu_rx { |
7172 | * to tid_tx[idx], which are protected by the sta spinlock) |
7173 | * tid_start_tx is also protected by sta->lock. |
7174 | * @tid_rx: aggregation info for Rx per TID -- RCU protected |
7175 | + * @tid_rx_token: dialog tokens for valid aggregation sessions |
7176 | * @tid_rx_timer_expired: bitmap indicating on which TIDs the |
7177 | * RX timer expired until the work for it runs |
7178 | * @tid_rx_stop_requested: bitmap indicating which BA sessions per TID the |
7179 | @@ -243,6 +242,7 @@ struct sta_ampdu_mlme { |
7180 | struct mutex mtx; |
7181 | /* rx */ |
7182 | struct tid_ampdu_rx __rcu *tid_rx[IEEE80211_NUM_TIDS]; |
7183 | + u8 tid_rx_token[IEEE80211_NUM_TIDS]; |
7184 | unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; |
7185 | unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; |
7186 | unsigned long agg_session_valid[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; |
7187 | diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c |
7188 | index bd5f4be89435..dd190ff3daea 100644 |
7189 | --- a/net/mac80211/tx.c |
7190 | +++ b/net/mac80211/tx.c |
7191 | @@ -3262,7 +3262,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, |
7192 | int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2); |
7193 | int hw_headroom = sdata->local->hw.extra_tx_headroom; |
7194 | struct ethhdr eth; |
7195 | - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
7196 | + struct ieee80211_tx_info *info; |
7197 | struct ieee80211_hdr *hdr = (void *)fast_tx->hdr; |
7198 | struct ieee80211_tx_data tx; |
7199 | ieee80211_tx_result r; |
7200 | @@ -3326,6 +3326,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, |
7201 | memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN); |
7202 | memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN); |
7203 | |
7204 | + info = IEEE80211_SKB_CB(skb); |
7205 | memset(info, 0, sizeof(*info)); |
7206 | info->band = fast_tx->band; |
7207 | info->control.vif = &sdata->vif; |
7208 | diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c |
7209 | index c510810f0b7c..a2dd6edaae37 100644 |
7210 | --- a/net/wireless/nl80211.c |
7211 | +++ b/net/wireless/nl80211.c |
7212 | @@ -414,6 +414,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { |
7213 | [NL80211_ATTR_NAN_MASTER_PREF] = { .type = NLA_U8 }, |
7214 | [NL80211_ATTR_NAN_DUAL] = { .type = NLA_U8 }, |
7215 | [NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED }, |
7216 | + [NL80211_ATTR_BSSID] = { .len = ETH_ALEN }, |
7217 | }; |
7218 | |
7219 | /* policy for the key attributes */ |
7220 | @@ -6677,7 +6678,20 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) |
7221 | request->no_cck = |
7222 | nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); |
7223 | |
7224 | - if (info->attrs[NL80211_ATTR_MAC]) |
7225 | + /* Initial implementation used NL80211_ATTR_MAC to set the specific |
7226 | + * BSSID to scan for. This was problematic because that same attribute |
7227 | + * was already used for another purpose (local random MAC address). The |
7228 | + * NL80211_ATTR_BSSID attribute was added to fix this. For backwards |
7229 | + * compatibility with older userspace components, also use the |
7230 | + * NL80211_ATTR_MAC value here if it can be determined to be used for |
7231 | + * the specific BSSID use case instead of the random MAC address |
7232 | + * (NL80211_ATTR_SCAN_FLAGS is used to enable random MAC address use). |
7233 | + */ |
7234 | + if (info->attrs[NL80211_ATTR_BSSID]) |
7235 | + memcpy(request->bssid, |
7236 | + nla_data(info->attrs[NL80211_ATTR_BSSID]), ETH_ALEN); |
7237 | + else if (!(request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) && |
7238 | + info->attrs[NL80211_ATTR_MAC]) |
7239 | memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_MAC]), |
7240 | ETH_ALEN); |
7241 | else |
7242 | diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h |
7243 | index 950fd2e64bb7..12262c0cc691 100644 |
7244 | --- a/scripts/gcc-plugins/gcc-common.h |
7245 | +++ b/scripts/gcc-plugins/gcc-common.h |
7246 | @@ -39,6 +39,9 @@ |
7247 | #include "hash-map.h" |
7248 | #endif |
7249 | |
7250 | +#if BUILDING_GCC_VERSION >= 7000 |
7251 | +#include "memmodel.h" |
7252 | +#endif |
7253 | #include "emit-rtl.h" |
7254 | #include "debug.h" |
7255 | #include "target.h" |
7256 | @@ -91,6 +94,9 @@ |
7257 | #include "tree-ssa-alias.h" |
7258 | #include "tree-ssa.h" |
7259 | #include "stringpool.h" |
7260 | +#if BUILDING_GCC_VERSION >= 7000 |
7261 | +#include "tree-vrp.h" |
7262 | +#endif |
7263 | #include "tree-ssanames.h" |
7264 | #include "print-tree.h" |
7265 | #include "tree-eh.h" |
7266 | @@ -287,6 +293,22 @@ static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct c |
7267 | return NULL; |
7268 | } |
7269 | |
7270 | +static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable) |
7271 | +{ |
7272 | + cgraph_node_ptr alias; |
7273 | + |
7274 | + if (callback(node, data)) |
7275 | + return true; |
7276 | + |
7277 | + for (alias = node->same_body; alias; alias = alias->next) { |
7278 | + if (include_overwritable || cgraph_function_body_availability(alias) > AVAIL_OVERWRITABLE) |
7279 | + if (cgraph_for_node_and_aliases(alias, callback, data, include_overwritable)) |
7280 | + return true; |
7281 | + } |
7282 | + |
7283 | + return false; |
7284 | +} |
7285 | + |
7286 | #define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \ |
7287 | for ((node) = cgraph_first_function_with_gimple_body(); (node); \ |
7288 | (node) = cgraph_next_function_with_gimple_body(node)) |
7289 | @@ -399,6 +421,7 @@ typedef union gimple_statement_d gassign; |
7290 | typedef union gimple_statement_d gcall; |
7291 | typedef union gimple_statement_d gcond; |
7292 | typedef union gimple_statement_d gdebug; |
7293 | +typedef union gimple_statement_d ggoto; |
7294 | typedef union gimple_statement_d gphi; |
7295 | typedef union gimple_statement_d greturn; |
7296 | |
7297 | @@ -452,6 +475,16 @@ static inline const gdebug *as_a_const_gdebug(const_gimple stmt) |
7298 | return stmt; |
7299 | } |
7300 | |
7301 | +static inline ggoto *as_a_ggoto(gimple stmt) |
7302 | +{ |
7303 | + return stmt; |
7304 | +} |
7305 | + |
7306 | +static inline const ggoto *as_a_const_ggoto(const_gimple stmt) |
7307 | +{ |
7308 | + return stmt; |
7309 | +} |
7310 | + |
7311 | static inline gphi *as_a_gphi(gimple stmt) |
7312 | { |
7313 | return stmt; |
7314 | @@ -496,6 +529,14 @@ static inline const greturn *as_a_const_greturn(const_gimple stmt) |
7315 | |
7316 | typedef struct rtx_def rtx_insn; |
7317 | |
7318 | +static inline const char *get_decl_section_name(const_tree decl) |
7319 | +{ |
7320 | + if (DECL_SECTION_NAME(decl) == NULL_TREE) |
7321 | + return NULL; |
7322 | + |
7323 | + return TREE_STRING_POINTER(DECL_SECTION_NAME(decl)); |
7324 | +} |
7325 | + |
7326 | static inline void set_decl_section_name(tree node, const char *value) |
7327 | { |
7328 | if (value) |
7329 | @@ -511,6 +552,7 @@ typedef struct gimple_statement_base gassign; |
7330 | typedef struct gimple_statement_call gcall; |
7331 | typedef struct gimple_statement_base gcond; |
7332 | typedef struct gimple_statement_base gdebug; |
7333 | +typedef struct gimple_statement_base ggoto; |
7334 | typedef struct gimple_statement_phi gphi; |
7335 | typedef struct gimple_statement_base greturn; |
7336 | |
7337 | @@ -564,6 +606,16 @@ static inline const gdebug *as_a_const_gdebug(const_gimple stmt) |
7338 | return stmt; |
7339 | } |
7340 | |
7341 | +static inline ggoto *as_a_ggoto(gimple stmt) |
7342 | +{ |
7343 | + return stmt; |
7344 | +} |
7345 | + |
7346 | +static inline const ggoto *as_a_const_ggoto(const_gimple stmt) |
7347 | +{ |
7348 | + return stmt; |
7349 | +} |
7350 | + |
7351 | static inline gphi *as_a_gphi(gimple stmt) |
7352 | { |
7353 | return as_a<gphi>(stmt); |
7354 | @@ -611,6 +663,11 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs) |
7355 | |
7356 | #define INSN_DELETED_P(insn) (insn)->deleted() |
7357 | |
7358 | +static inline const char *get_decl_section_name(const_tree decl) |
7359 | +{ |
7360 | + return DECL_SECTION_NAME(decl); |
7361 | +} |
7362 | + |
7363 | /* symtab/cgraph related */ |
7364 | #define debug_cgraph_node(node) (node)->debug() |
7365 | #define cgraph_get_node(decl) cgraph_node::get(decl) |
7366 | @@ -619,6 +676,7 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs) |
7367 | #define cgraph_n_nodes symtab->cgraph_count |
7368 | #define cgraph_max_uid symtab->cgraph_max_uid |
7369 | #define varpool_get_node(decl) varpool_node::get(decl) |
7370 | +#define dump_varpool_node(file, node) (node)->dump(file) |
7371 | |
7372 | #define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \ |
7373 | (caller)->create_edge((callee), (call_stmt), (count), (freq)) |
7374 | @@ -674,6 +732,11 @@ static inline cgraph_node_ptr cgraph_alias_target(cgraph_node_ptr node) |
7375 | return node->get_alias_target(); |
7376 | } |
7377 | |
7378 | +static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable) |
7379 | +{ |
7380 | + return node->call_for_symbol_thunks_and_aliases(callback, data, include_overwritable); |
7381 | +} |
7382 | + |
7383 | static inline struct cgraph_node_hook_list *cgraph_add_function_insertion_hook(cgraph_node_hook hook, void *data) |
7384 | { |
7385 | return symtab->add_cgraph_insertion_hook(hook, data); |
7386 | @@ -731,6 +794,13 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l |
7387 | |
7388 | template <> |
7389 | template <> |
7390 | +inline bool is_a_helper<const ggoto *>::test(const_gimple gs) |
7391 | +{ |
7392 | + return gs->code == GIMPLE_GOTO; |
7393 | +} |
7394 | + |
7395 | +template <> |
7396 | +template <> |
7397 | inline bool is_a_helper<const greturn *>::test(const_gimple gs) |
7398 | { |
7399 | return gs->code == GIMPLE_RETURN; |
7400 | @@ -766,6 +836,16 @@ static inline const gcall *as_a_const_gcall(const_gimple stmt) |
7401 | return as_a<const gcall *>(stmt); |
7402 | } |
7403 | |
7404 | +static inline ggoto *as_a_ggoto(gimple stmt) |
7405 | +{ |
7406 | + return as_a<ggoto *>(stmt); |
7407 | +} |
7408 | + |
7409 | +static inline const ggoto *as_a_const_ggoto(const_gimple stmt) |
7410 | +{ |
7411 | + return as_a<const ggoto *>(stmt); |
7412 | +} |
7413 | + |
7414 | static inline gphi *as_a_gphi(gimple stmt) |
7415 | { |
7416 | return as_a<gphi *>(stmt); |
7417 | @@ -828,4 +908,9 @@ static inline void debug_gimple_stmt(const_gimple s) |
7418 | #define debug_gimple_stmt(s) debug_gimple_stmt(CONST_CAST_GIMPLE(s)) |
7419 | #endif |
7420 | |
7421 | +#if BUILDING_GCC_VERSION >= 7000 |
7422 | +#define get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep, keep_aligning) \ |
7423 | + get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep) |
7424 | +#endif |
7425 | + |
7426 | #endif |
7427 | diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c |
7428 | index c07a3844ea0a..3df46906492d 100644 |
7429 | --- a/security/integrity/ima/ima_fs.c |
7430 | +++ b/security/integrity/ima/ima_fs.c |
7431 | @@ -401,7 +401,7 @@ static int ima_release_policy(struct inode *inode, struct file *file) |
7432 | const char *cause = valid_policy ? "completed" : "failed"; |
7433 | |
7434 | if ((file->f_flags & O_ACCMODE) == O_RDONLY) |
7435 | - return 0; |
7436 | + return seq_release(inode, file); |
7437 | |
7438 | if (valid_policy && ima_check_policy() < 0) { |
7439 | cause = "failed"; |
7440 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
7441 | index 3f75d1b83bf2..758ac86a1d3a 100644 |
7442 | --- a/sound/pci/hda/patch_realtek.c |
7443 | +++ b/sound/pci/hda/patch_realtek.c |
7444 | @@ -2230,6 +2230,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { |
7445 | SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), |
7446 | SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), |
7447 | SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), |
7448 | + SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), |
7449 | SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), |
7450 | SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), |
7451 | SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), |
7452 | @@ -6943,6 +6944,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { |
7453 | SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16), |
7454 | SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51), |
7455 | SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51), |
7456 | + SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8), |
7457 | SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16), |
7458 | SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP), |
7459 | SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT), |
7460 | diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c |
7461 | index 56056ed7fcfd..16c94c45ce50 100644 |
7462 | --- a/sound/soc/intel/boards/cht_bsw_rt5645.c |
7463 | +++ b/sound/soc/intel/boards/cht_bsw_rt5645.c |
7464 | @@ -44,6 +44,7 @@ struct cht_acpi_card { |
7465 | struct cht_mc_private { |
7466 | struct snd_soc_jack jack; |
7467 | struct cht_acpi_card *acpi_card; |
7468 | + char codec_name[16]; |
7469 | }; |
7470 | |
7471 | static inline struct snd_soc_dai *cht_get_codec_dai(struct snd_soc_card *card) |
7472 | @@ -354,7 +355,6 @@ static int snd_cht_mc_probe(struct platform_device *pdev) |
7473 | int i; |
7474 | struct cht_mc_private *drv; |
7475 | struct snd_soc_card *card = snd_soc_cards[0].soc_card; |
7476 | - char codec_name[16]; |
7477 | struct sst_acpi_mach *mach; |
7478 | const char *i2c_name = NULL; |
7479 | int dai_index = 0; |
7480 | @@ -374,12 +374,12 @@ static int snd_cht_mc_probe(struct platform_device *pdev) |
7481 | } |
7482 | card->dev = &pdev->dev; |
7483 | mach = card->dev->platform_data; |
7484 | - sprintf(codec_name, "i2c-%s:00", drv->acpi_card->codec_id); |
7485 | + sprintf(drv->codec_name, "i2c-%s:00", drv->acpi_card->codec_id); |
7486 | |
7487 | /* set correct codec name */ |
7488 | for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) |
7489 | if (!strcmp(card->dai_link[i].codec_name, "i2c-10EC5645:00")) { |
7490 | - card->dai_link[i].codec_name = kstrdup(codec_name, GFP_KERNEL); |
7491 | + card->dai_link[i].codec_name = drv->codec_name; |
7492 | dai_index = i; |
7493 | } |
7494 | |
7495 | diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c |
7496 | index 8dc03039b311..ea162fbf68e5 100644 |
7497 | --- a/sound/soc/intel/skylake/skl-sst-utils.c |
7498 | +++ b/sound/soc/intel/skylake/skl-sst-utils.c |
7499 | @@ -179,7 +179,7 @@ static inline int skl_getid_32(struct uuid_module *module, u64 *val, |
7500 | index = ffz(mask_val); |
7501 | pvt_id = index + word1_mask + word2_mask; |
7502 | if (pvt_id <= (max_inst - 1)) { |
7503 | - *val |= 1 << (index + word1_mask); |
7504 | + *val |= 1ULL << (index + word1_mask); |
7505 | return pvt_id; |
7506 | } |
7507 | } |
7508 | diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c |
7509 | index b392e51de94d..420d200f9a05 100644 |
7510 | --- a/sound/soc/qcom/lpass-platform.c |
7511 | +++ b/sound/soc/qcom/lpass-platform.c |
7512 | @@ -78,6 +78,9 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream) |
7513 | dma_ch = 0; |
7514 | if (v->alloc_dma_channel) |
7515 | dma_ch = v->alloc_dma_channel(drvdata, dir); |
7516 | + else |
7517 | + dma_ch = 0; |
7518 | + |
7519 | if (dma_ch < 0) |
7520 | return dma_ch; |
7521 | |
7522 | diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c |
7523 | index 7825bff45ae3..85324e61cbd5 100644 |
7524 | --- a/sound/soc/samsung/i2s.c |
7525 | +++ b/sound/soc/samsung/i2s.c |
7526 | @@ -1029,12 +1029,13 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai) |
7527 | static int samsung_i2s_dai_remove(struct snd_soc_dai *dai) |
7528 | { |
7529 | struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai); |
7530 | + unsigned long flags; |
7531 | |
7532 | if (!is_secondary(i2s)) { |
7533 | if (i2s->quirks & QUIRK_NEED_RSTCLR) { |
7534 | - spin_lock(i2s->lock); |
7535 | + spin_lock_irqsave(i2s->lock, flags); |
7536 | writel(0, i2s->addr + I2SCON); |
7537 | - spin_unlock(i2s->lock); |
7538 | + spin_unlock_irqrestore(i2s->lock, flags); |
7539 | } |
7540 | } |
7541 | |
7542 | diff --git a/sound/usb/card.c b/sound/usb/card.c |
7543 | index 2ddc034673a8..f36cb068dad3 100644 |
7544 | --- a/sound/usb/card.c |
7545 | +++ b/sound/usb/card.c |
7546 | @@ -206,7 +206,6 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int |
7547 | if (! snd_usb_parse_audio_interface(chip, interface)) { |
7548 | usb_set_interface(dev, interface, 0); /* reset the current interface */ |
7549 | usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); |
7550 | - return -EINVAL; |
7551 | } |
7552 | |
7553 | return 0; |
7554 | diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c |
7555 | index c470251cea4b..c5251aaad844 100644 |
7556 | --- a/sound/usb/endpoint.c |
7557 | +++ b/sound/usb/endpoint.c |
7558 | @@ -534,6 +534,11 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep) |
7559 | alive, ep->ep_num); |
7560 | clear_bit(EP_FLAG_STOPPING, &ep->flags); |
7561 | |
7562 | + ep->data_subs = NULL; |
7563 | + ep->sync_slave = NULL; |
7564 | + ep->retire_data_urb = NULL; |
7565 | + ep->prepare_data_urb = NULL; |
7566 | + |
7567 | return 0; |
7568 | } |
7569 | |
7570 | @@ -898,9 +903,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, |
7571 | /** |
7572 | * snd_usb_endpoint_start: start an snd_usb_endpoint |
7573 | * |
7574 | - * @ep: the endpoint to start |
7575 | - * @can_sleep: flag indicating whether the operation is executed in |
7576 | - * non-atomic context |
7577 | + * @ep: the endpoint to start |
7578 | * |
7579 | * A call to this function will increment the use count of the endpoint. |
7580 | * In case it is not already running, the URBs for this endpoint will be |
7581 | @@ -910,7 +913,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, |
7582 | * |
7583 | * Returns an error if the URB submission failed, 0 in all other cases. |
7584 | */ |
7585 | -int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep) |
7586 | +int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) |
7587 | { |
7588 | int err; |
7589 | unsigned int i; |
7590 | @@ -924,8 +927,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep) |
7591 | |
7592 | /* just to be sure */ |
7593 | deactivate_urbs(ep, false); |
7594 | - if (can_sleep) |
7595 | - wait_clear_urbs(ep); |
7596 | |
7597 | ep->active_mask = 0; |
7598 | ep->unlink_mask = 0; |
7599 | @@ -1006,10 +1007,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep) |
7600 | |
7601 | if (--ep->use_count == 0) { |
7602 | deactivate_urbs(ep, false); |
7603 | - ep->data_subs = NULL; |
7604 | - ep->sync_slave = NULL; |
7605 | - ep->retire_data_urb = NULL; |
7606 | - ep->prepare_data_urb = NULL; |
7607 | set_bit(EP_FLAG_STOPPING, &ep->flags); |
7608 | } |
7609 | } |
7610 | diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h |
7611 | index 6428392d8f62..584f295d7c77 100644 |
7612 | --- a/sound/usb/endpoint.h |
7613 | +++ b/sound/usb/endpoint.h |
7614 | @@ -18,7 +18,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, |
7615 | struct audioformat *fmt, |
7616 | struct snd_usb_endpoint *sync_ep); |
7617 | |
7618 | -int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep); |
7619 | +int snd_usb_endpoint_start(struct snd_usb_endpoint *ep); |
7620 | void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep); |
7621 | void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep); |
7622 | int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); |
7623 | diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c |
7624 | index 44d178ee9177..48afae053c56 100644 |
7625 | --- a/sound/usb/pcm.c |
7626 | +++ b/sound/usb/pcm.c |
7627 | @@ -218,7 +218,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, |
7628 | } |
7629 | } |
7630 | |
7631 | -static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep) |
7632 | +static int start_endpoints(struct snd_usb_substream *subs) |
7633 | { |
7634 | int err; |
7635 | |
7636 | @@ -231,7 +231,7 @@ static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep) |
7637 | dev_dbg(&subs->dev->dev, "Starting data EP @%p\n", ep); |
7638 | |
7639 | ep->data_subs = subs; |
7640 | - err = snd_usb_endpoint_start(ep, can_sleep); |
7641 | + err = snd_usb_endpoint_start(ep); |
7642 | if (err < 0) { |
7643 | clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags); |
7644 | return err; |
7645 | @@ -260,7 +260,7 @@ static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep) |
7646 | dev_dbg(&subs->dev->dev, "Starting sync EP @%p\n", ep); |
7647 | |
7648 | ep->sync_slave = subs->data_endpoint; |
7649 | - err = snd_usb_endpoint_start(ep, can_sleep); |
7650 | + err = snd_usb_endpoint_start(ep); |
7651 | if (err < 0) { |
7652 | clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags); |
7653 | return err; |
7654 | @@ -839,7 +839,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) |
7655 | /* for playback, submit the URBs now; otherwise, the first hwptr_done |
7656 | * updates for all URBs would happen at the same time when starting */ |
7657 | if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) |
7658 | - ret = start_endpoints(subs, true); |
7659 | + ret = start_endpoints(subs); |
7660 | |
7661 | unlock: |
7662 | snd_usb_unlock_shutdown(subs->stream->chip); |
7663 | @@ -1655,7 +1655,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream |
7664 | |
7665 | switch (cmd) { |
7666 | case SNDRV_PCM_TRIGGER_START: |
7667 | - err = start_endpoints(subs, false); |
7668 | + err = start_endpoints(subs); |
7669 | if (err < 0) |
7670 | return err; |
7671 |