Contents of /trunk/kernel-magellan/patches-4.13/0103-4.13.4-all-fixes.patch
Parent Directory | Revision Log
Revision 2996 -
(show annotations)
(download)
Mon Oct 9 08:50:13 2017 UTC (6 years, 11 months ago) by niro
File size: 166001 byte(s)
Mon Oct 9 08:50:13 2017 UTC (6 years, 11 months ago) by niro
File size: 166001 byte(s)
-linux-4.13.4
1 | diff --git a/Documentation/dev-tools/gdb-kernel-debugging.rst b/Documentation/dev-tools/gdb-kernel-debugging.rst |
2 | index 5e93c9bc6619..19df79286f00 100644 |
3 | --- a/Documentation/dev-tools/gdb-kernel-debugging.rst |
4 | +++ b/Documentation/dev-tools/gdb-kernel-debugging.rst |
5 | @@ -31,11 +31,13 @@ Setup |
6 | CONFIG_DEBUG_INFO_REDUCED off. If your architecture supports |
7 | CONFIG_FRAME_POINTER, keep it enabled. |
8 | |
9 | -- Install that kernel on the guest. |
10 | +- Install that kernel on the guest, turn off KASLR if necessary by adding |
11 | + "nokaslr" to the kernel command line. |
12 | Alternatively, QEMU allows to boot the kernel directly using -kernel, |
13 | -append, -initrd command line switches. This is generally only useful if |
14 | you do not depend on modules. See QEMU documentation for more details on |
15 | - this mode. |
16 | + this mode. In this case, you should build the kernel with |
17 | + CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR. |
18 | |
19 | - Enable the gdb stub of QEMU/KVM, either |
20 | |
21 | diff --git a/Makefile b/Makefile |
22 | index 0f31ef4aea7b..159901979dec 100644 |
23 | --- a/Makefile |
24 | +++ b/Makefile |
25 | @@ -1,6 +1,6 @@ |
26 | VERSION = 4 |
27 | PATCHLEVEL = 13 |
28 | -SUBLEVEL = 3 |
29 | +SUBLEVEL = 4 |
30 | EXTRAVERSION = |
31 | NAME = Fearless Coyote |
32 | |
33 | diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S |
34 | index 1eea99beecc3..85d9ea4a0acc 100644 |
35 | --- a/arch/arc/kernel/entry.S |
36 | +++ b/arch/arc/kernel/entry.S |
37 | @@ -92,6 +92,12 @@ ENTRY(EV_MachineCheck) |
38 | lr r0, [efa] |
39 | mov r1, sp |
40 | |
41 | + ; hardware auto-disables MMU, re-enable it to allow kernel vaddr |
42 | + ; access for say stack unwinding of modules for crash dumps |
43 | + lr r3, [ARC_REG_PID] |
44 | + or r3, r3, MMU_ENABLE |
45 | + sr r3, [ARC_REG_PID] |
46 | + |
47 | lsr r3, r2, 8 |
48 | bmsk r3, r3, 7 |
49 | brne r3, ECR_C_MCHK_DUP_TLB, 1f |
50 | diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c |
51 | index b181f3ee38aa..ac81502055f8 100644 |
52 | --- a/arch/arc/mm/tlb.c |
53 | +++ b/arch/arc/mm/tlb.c |
54 | @@ -908,9 +908,6 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, |
55 | |
56 | local_irq_save(flags); |
57 | |
58 | - /* re-enable the MMU */ |
59 | - write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID)); |
60 | - |
61 | /* loop thru all sets of TLB */ |
62 | for (set = 0; set < mmu->sets; set++) { |
63 | |
64 | diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c |
65 | index fd71b8daaaf2..5bec64f2884e 100644 |
66 | --- a/arch/mips/math-emu/dp_fmax.c |
67 | +++ b/arch/mips/math-emu/dp_fmax.c |
68 | @@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y) |
69 | case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): |
70 | return ieee754dp_nanxcpt(x); |
71 | |
72 | - /* numbers are preferred to NaNs */ |
73 | + /* |
74 | + * Quiet NaN handling |
75 | + */ |
76 | + |
77 | + /* |
78 | + * The case of both inputs quiet NaNs |
79 | + */ |
80 | + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
81 | + return x; |
82 | + |
83 | + /* |
84 | + * The cases of exactly one input quiet NaN (numbers |
85 | + * are here preferred as returned values to NaNs) |
86 | + */ |
87 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): |
88 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): |
89 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): |
90 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): |
91 | return x; |
92 | |
93 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
94 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): |
95 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): |
96 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): |
97 | @@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y) |
98 | return ys ? x : y; |
99 | |
100 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): |
101 | - if (xs == ys) |
102 | - return x; |
103 | - return ieee754dp_zero(1); |
104 | + return ieee754dp_zero(xs & ys); |
105 | |
106 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): |
107 | DPDNORMX; |
108 | @@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y) |
109 | else if (xs < ys) |
110 | return x; |
111 | |
112 | - /* Compare exponent */ |
113 | - if (xe > ye) |
114 | - return x; |
115 | - else if (xe < ye) |
116 | - return y; |
117 | + /* Signs of inputs are equal, let's compare exponents */ |
118 | + if (xs == 0) { |
119 | + /* Inputs are both positive */ |
120 | + if (xe > ye) |
121 | + return x; |
122 | + else if (xe < ye) |
123 | + return y; |
124 | + } else { |
125 | + /* Inputs are both negative */ |
126 | + if (xe > ye) |
127 | + return y; |
128 | + else if (xe < ye) |
129 | + return x; |
130 | + } |
131 | |
132 | - /* Compare mantissa */ |
133 | + /* Signs and exponents of inputs are equal, let's compare mantissas */ |
134 | + if (xs == 0) { |
135 | + /* Inputs are both positive, with equal signs and exponents */ |
136 | + if (xm <= ym) |
137 | + return y; |
138 | + return x; |
139 | + } |
140 | + /* Inputs are both negative, with equal signs and exponents */ |
141 | if (xm <= ym) |
142 | - return y; |
143 | - return x; |
144 | + return x; |
145 | + return y; |
146 | } |
147 | |
148 | union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) |
149 | @@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) |
150 | case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): |
151 | return ieee754dp_nanxcpt(x); |
152 | |
153 | - /* numbers are preferred to NaNs */ |
154 | + /* |
155 | + * Quiet NaN handling |
156 | + */ |
157 | + |
158 | + /* |
159 | + * The case of both inputs quiet NaNs |
160 | + */ |
161 | + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
162 | + return x; |
163 | + |
164 | + /* |
165 | + * The cases of exactly one input quiet NaN (numbers |
166 | + * are here preferred as returned values to NaNs) |
167 | + */ |
168 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): |
169 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): |
170 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): |
171 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): |
172 | return x; |
173 | |
174 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
175 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): |
176 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): |
177 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): |
178 | @@ -164,6 +202,9 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) |
179 | /* |
180 | * Infinity and zero handling |
181 | */ |
182 | + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): |
183 | + return ieee754dp_inf(xs & ys); |
184 | + |
185 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): |
186 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): |
187 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): |
188 | @@ -171,7 +212,6 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) |
189 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): |
190 | return x; |
191 | |
192 | - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): |
193 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): |
194 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): |
195 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): |
196 | @@ -180,9 +220,7 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) |
197 | return y; |
198 | |
199 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): |
200 | - if (xs == ys) |
201 | - return x; |
202 | - return ieee754dp_zero(1); |
203 | + return ieee754dp_zero(xs & ys); |
204 | |
205 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): |
206 | DPDNORMX; |
207 | @@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) |
208 | return y; |
209 | |
210 | /* Compare mantissa */ |
211 | - if (xm <= ym) |
212 | + if (xm < ym) |
213 | return y; |
214 | - return x; |
215 | + else if (xm > ym) |
216 | + return x; |
217 | + else if (xs == 0) |
218 | + return x; |
219 | + return y; |
220 | } |
221 | diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c |
222 | index c1072b0dfb95..a287b23818d8 100644 |
223 | --- a/arch/mips/math-emu/dp_fmin.c |
224 | +++ b/arch/mips/math-emu/dp_fmin.c |
225 | @@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y) |
226 | case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): |
227 | return ieee754dp_nanxcpt(x); |
228 | |
229 | - /* numbers are preferred to NaNs */ |
230 | + /* |
231 | + * Quiet NaN handling |
232 | + */ |
233 | + |
234 | + /* |
235 | + * The case of both inputs quiet NaNs |
236 | + */ |
237 | + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
238 | + return x; |
239 | + |
240 | + /* |
241 | + * The cases of exactly one input quiet NaN (numbers |
242 | + * are here preferred as returned values to NaNs) |
243 | + */ |
244 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): |
245 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): |
246 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): |
247 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): |
248 | return x; |
249 | |
250 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
251 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): |
252 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): |
253 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): |
254 | @@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y) |
255 | return ys ? y : x; |
256 | |
257 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): |
258 | - if (xs == ys) |
259 | - return x; |
260 | - return ieee754dp_zero(1); |
261 | + return ieee754dp_zero(xs | ys); |
262 | |
263 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): |
264 | DPDNORMX; |
265 | @@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y) |
266 | else if (xs < ys) |
267 | return y; |
268 | |
269 | - /* Compare exponent */ |
270 | - if (xe > ye) |
271 | - return y; |
272 | - else if (xe < ye) |
273 | - return x; |
274 | + /* Signs of inputs are the same, let's compare exponents */ |
275 | + if (xs == 0) { |
276 | + /* Inputs are both positive */ |
277 | + if (xe > ye) |
278 | + return y; |
279 | + else if (xe < ye) |
280 | + return x; |
281 | + } else { |
282 | + /* Inputs are both negative */ |
283 | + if (xe > ye) |
284 | + return x; |
285 | + else if (xe < ye) |
286 | + return y; |
287 | + } |
288 | |
289 | - /* Compare mantissa */ |
290 | + /* Signs and exponents of inputs are equal, let's compare mantissas */ |
291 | + if (xs == 0) { |
292 | + /* Inputs are both positive, with equal signs and exponents */ |
293 | + if (xm <= ym) |
294 | + return x; |
295 | + return y; |
296 | + } |
297 | + /* Inputs are both negative, with equal signs and exponents */ |
298 | if (xm <= ym) |
299 | - return x; |
300 | - return y; |
301 | + return y; |
302 | + return x; |
303 | } |
304 | |
305 | union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y) |
306 | @@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y) |
307 | case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): |
308 | return ieee754dp_nanxcpt(x); |
309 | |
310 | - /* numbers are preferred to NaNs */ |
311 | + /* |
312 | + * Quiet NaN handling |
313 | + */ |
314 | + |
315 | + /* |
316 | + * The case of both inputs quiet NaNs |
317 | + */ |
318 | + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
319 | + return x; |
320 | + |
321 | + /* |
322 | + * The cases of exactly one input quiet NaN (numbers |
323 | + * are here preferred as returned values to NaNs) |
324 | + */ |
325 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): |
326 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): |
327 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): |
328 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): |
329 | return x; |
330 | |
331 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
332 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): |
333 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): |
334 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): |
335 | @@ -164,25 +202,25 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y) |
336 | /* |
337 | * Infinity and zero handling |
338 | */ |
339 | + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): |
340 | + return ieee754dp_inf(xs | ys); |
341 | + |
342 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): |
343 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): |
344 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): |
345 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): |
346 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): |
347 | - return x; |
348 | + return y; |
349 | |
350 | - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): |
351 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): |
352 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): |
353 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): |
354 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): |
355 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): |
356 | - return y; |
357 | + return x; |
358 | |
359 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): |
360 | - if (xs == ys) |
361 | - return x; |
362 | - return ieee754dp_zero(1); |
363 | + return ieee754dp_zero(xs | ys); |
364 | |
365 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): |
366 | DPDNORMX; |
367 | @@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y) |
368 | return x; |
369 | |
370 | /* Compare mantissa */ |
371 | - if (xm <= ym) |
372 | + if (xm < ym) |
373 | + return x; |
374 | + else if (xm > ym) |
375 | + return y; |
376 | + else if (xs == 1) |
377 | return x; |
378 | return y; |
379 | } |
380 | diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c |
381 | index caa62f20a888..e0d9be5fbf4c 100644 |
382 | --- a/arch/mips/math-emu/dp_maddf.c |
383 | +++ b/arch/mips/math-emu/dp_maddf.c |
384 | @@ -14,22 +14,45 @@ |
385 | |
386 | #include "ieee754dp.h" |
387 | |
388 | -enum maddf_flags { |
389 | - maddf_negate_product = 1 << 0, |
390 | -}; |
391 | + |
392 | +/* 128 bits shift right logical with rounding. */ |
393 | +void srl128(u64 *hptr, u64 *lptr, int count) |
394 | +{ |
395 | + u64 low; |
396 | + |
397 | + if (count >= 128) { |
398 | + *lptr = *hptr != 0 || *lptr != 0; |
399 | + *hptr = 0; |
400 | + } else if (count >= 64) { |
401 | + if (count == 64) { |
402 | + *lptr = *hptr | (*lptr != 0); |
403 | + } else { |
404 | + low = *lptr; |
405 | + *lptr = *hptr >> (count - 64); |
406 | + *lptr |= (*hptr << (128 - count)) != 0 || low != 0; |
407 | + } |
408 | + *hptr = 0; |
409 | + } else { |
410 | + low = *lptr; |
411 | + *lptr = low >> count | *hptr << (64 - count); |
412 | + *lptr |= (low << (64 - count)) != 0; |
413 | + *hptr = *hptr >> count; |
414 | + } |
415 | +} |
416 | |
417 | static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, |
418 | union ieee754dp y, enum maddf_flags flags) |
419 | { |
420 | int re; |
421 | int rs; |
422 | - u64 rm; |
423 | unsigned lxm; |
424 | unsigned hxm; |
425 | unsigned lym; |
426 | unsigned hym; |
427 | u64 lrm; |
428 | u64 hrm; |
429 | + u64 lzm; |
430 | + u64 hzm; |
431 | u64 t; |
432 | u64 at; |
433 | int s; |
434 | @@ -48,52 +71,34 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, |
435 | |
436 | ieee754_clearcx(); |
437 | |
438 | - switch (zc) { |
439 | - case IEEE754_CLASS_SNAN: |
440 | - ieee754_setcx(IEEE754_INVALID_OPERATION); |
441 | + /* |
442 | + * Handle the cases when at least one of x, y or z is a NaN. |
443 | + * Order of precedence is sNaN, qNaN and z, x, y. |
444 | + */ |
445 | + if (zc == IEEE754_CLASS_SNAN) |
446 | return ieee754dp_nanxcpt(z); |
447 | - case IEEE754_CLASS_DNORM: |
448 | - DPDNORMZ; |
449 | - /* QNAN and ZERO cases are handled separately below */ |
450 | - } |
451 | - |
452 | - switch (CLPAIR(xc, yc)) { |
453 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): |
454 | - case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): |
455 | - case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): |
456 | - case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): |
457 | - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): |
458 | - return ieee754dp_nanxcpt(y); |
459 | - |
460 | - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): |
461 | - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): |
462 | - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): |
463 | - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): |
464 | - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): |
465 | - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): |
466 | + if (xc == IEEE754_CLASS_SNAN) |
467 | return ieee754dp_nanxcpt(x); |
468 | - |
469 | - case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): |
470 | - case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): |
471 | - case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): |
472 | - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): |
473 | + if (yc == IEEE754_CLASS_SNAN) |
474 | + return ieee754dp_nanxcpt(y); |
475 | + if (zc == IEEE754_CLASS_QNAN) |
476 | + return z; |
477 | + if (xc == IEEE754_CLASS_QNAN) |
478 | + return x; |
479 | + if (yc == IEEE754_CLASS_QNAN) |
480 | return y; |
481 | |
482 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
483 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): |
484 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): |
485 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): |
486 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): |
487 | - return x; |
488 | + if (zc == IEEE754_CLASS_DNORM) |
489 | + DPDNORMZ; |
490 | + /* ZERO z cases are handled separately below */ |
491 | |
492 | + switch (CLPAIR(xc, yc)) { |
493 | |
494 | /* |
495 | * Infinity handling |
496 | */ |
497 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): |
498 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): |
499 | - if (zc == IEEE754_CLASS_QNAN) |
500 | - return z; |
501 | ieee754_setcx(IEEE754_INVALID_OPERATION); |
502 | return ieee754dp_indef(); |
503 | |
504 | @@ -102,9 +107,27 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, |
505 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): |
506 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): |
507 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): |
508 | - if (zc == IEEE754_CLASS_QNAN) |
509 | - return z; |
510 | - return ieee754dp_inf(xs ^ ys); |
511 | + if ((zc == IEEE754_CLASS_INF) && |
512 | + ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) || |
513 | + ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) { |
514 | + /* |
515 | + * Cases of addition of infinities with opposite signs |
516 | + * or subtraction of infinities with same signs. |
517 | + */ |
518 | + ieee754_setcx(IEEE754_INVALID_OPERATION); |
519 | + return ieee754dp_indef(); |
520 | + } |
521 | + /* |
522 | + * z is here either not an infinity, or an infinity having the |
523 | + * same sign as product (x*y) (in case of MADDF.D instruction) |
524 | + * or product -(x*y) (in MSUBF.D case). The result must be an |
525 | + * infinity, and its sign is determined only by the value of |
526 | + * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y. |
527 | + */ |
528 | + if (flags & MADDF_NEGATE_PRODUCT) |
529 | + return ieee754dp_inf(1 ^ (xs ^ ys)); |
530 | + else |
531 | + return ieee754dp_inf(xs ^ ys); |
532 | |
533 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): |
534 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): |
535 | @@ -113,32 +136,42 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, |
536 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): |
537 | if (zc == IEEE754_CLASS_INF) |
538 | return ieee754dp_inf(zs); |
539 | - /* Multiplication is 0 so just return z */ |
540 | + if (zc == IEEE754_CLASS_ZERO) { |
541 | + /* Handle cases +0 + (-0) and similar ones. */ |
542 | + if ((!(flags & MADDF_NEGATE_PRODUCT) |
543 | + && (zs == (xs ^ ys))) || |
544 | + ((flags & MADDF_NEGATE_PRODUCT) |
545 | + && (zs != (xs ^ ys)))) |
546 | + /* |
547 | + * Cases of addition of zeros of equal signs |
548 | + * or subtraction of zeroes of opposite signs. |
549 | + * The sign of the resulting zero is in any |
550 | + * such case determined only by the sign of z. |
551 | + */ |
552 | + return z; |
553 | + |
554 | + return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD); |
555 | + } |
556 | + /* x*y is here 0, and z is not 0, so just return z */ |
557 | return z; |
558 | |
559 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): |
560 | DPDNORMX; |
561 | |
562 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): |
563 | - if (zc == IEEE754_CLASS_QNAN) |
564 | - return z; |
565 | - else if (zc == IEEE754_CLASS_INF) |
566 | + if (zc == IEEE754_CLASS_INF) |
567 | return ieee754dp_inf(zs); |
568 | DPDNORMY; |
569 | break; |
570 | |
571 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): |
572 | - if (zc == IEEE754_CLASS_QNAN) |
573 | - return z; |
574 | - else if (zc == IEEE754_CLASS_INF) |
575 | + if (zc == IEEE754_CLASS_INF) |
576 | return ieee754dp_inf(zs); |
577 | DPDNORMX; |
578 | break; |
579 | |
580 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): |
581 | - if (zc == IEEE754_CLASS_QNAN) |
582 | - return z; |
583 | - else if (zc == IEEE754_CLASS_INF) |
584 | + if (zc == IEEE754_CLASS_INF) |
585 | return ieee754dp_inf(zs); |
586 | /* fall through to real computations */ |
587 | } |
588 | @@ -157,7 +190,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, |
589 | |
590 | re = xe + ye; |
591 | rs = xs ^ ys; |
592 | - if (flags & maddf_negate_product) |
593 | + if (flags & MADDF_NEGATE_PRODUCT) |
594 | rs ^= 1; |
595 | |
596 | /* shunt to top of word */ |
597 | @@ -165,7 +198,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, |
598 | ym <<= 64 - (DP_FBITS + 1); |
599 | |
600 | /* |
601 | - * Multiply 64 bits xm, ym to give high 64 bits rm with stickness. |
602 | + * Multiply 64 bits xm and ym to give 128 bits result in hrm:lrm. |
603 | */ |
604 | |
605 | /* 32 * 32 => 64 */ |
606 | @@ -195,81 +228,110 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, |
607 | |
608 | hrm = hrm + (t >> 32); |
609 | |
610 | - rm = hrm | (lrm != 0); |
611 | - |
612 | - /* |
613 | - * Sticky shift down to normal rounding precision. |
614 | - */ |
615 | - if ((s64) rm < 0) { |
616 | - rm = (rm >> (64 - (DP_FBITS + 1 + 3))) | |
617 | - ((rm << (DP_FBITS + 1 + 3)) != 0); |
618 | + /* Put explicit bit at bit 126 if necessary */ |
619 | + if ((int64_t)hrm < 0) { |
620 | + lrm = (hrm << 63) | (lrm >> 1); |
621 | + hrm = hrm >> 1; |
622 | re++; |
623 | - } else { |
624 | - rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) | |
625 | - ((rm << (DP_FBITS + 1 + 3 + 1)) != 0); |
626 | } |
627 | - assert(rm & (DP_HIDDEN_BIT << 3)); |
628 | |
629 | - if (zc == IEEE754_CLASS_ZERO) |
630 | - return ieee754dp_format(rs, re, rm); |
631 | + assert(hrm & (1 << 62)); |
632 | |
633 | - /* And now the addition */ |
634 | - assert(zm & DP_HIDDEN_BIT); |
635 | + if (zc == IEEE754_CLASS_ZERO) { |
636 | + /* |
637 | + * Move explicit bit from bit 126 to bit 55 since the |
638 | + * ieee754dp_format code expects the mantissa to be |
639 | + * 56 bits wide (53 + 3 rounding bits). |
640 | + */ |
641 | + srl128(&hrm, &lrm, (126 - 55)); |
642 | + return ieee754dp_format(rs, re, lrm); |
643 | + } |
644 | |
645 | - /* |
646 | - * Provide guard,round and stick bit space. |
647 | - */ |
648 | - zm <<= 3; |
649 | + /* Move explicit bit from bit 52 to bit 126 */ |
650 | + lzm = 0; |
651 | + hzm = zm << 10; |
652 | + assert(hzm & (1 << 62)); |
653 | |
654 | + /* Make the exponents the same */ |
655 | if (ze > re) { |
656 | /* |
657 | * Have to shift y fraction right to align. |
658 | */ |
659 | s = ze - re; |
660 | - rm = XDPSRS(rm, s); |
661 | + srl128(&hrm, &lrm, s); |
662 | re += s; |
663 | } else if (re > ze) { |
664 | /* |
665 | * Have to shift x fraction right to align. |
666 | */ |
667 | s = re - ze; |
668 | - zm = XDPSRS(zm, s); |
669 | + srl128(&hzm, &lzm, s); |
670 | ze += s; |
671 | } |
672 | assert(ze == re); |
673 | assert(ze <= DP_EMAX); |
674 | |
675 | + /* Do the addition */ |
676 | if (zs == rs) { |
677 | /* |
678 | - * Generate 28 bit result of adding two 27 bit numbers |
679 | - * leaving result in xm, xs and xe. |
680 | + * Generate 128 bit result by adding two 127 bit numbers |
681 | + * leaving result in hzm:lzm, zs and ze. |
682 | */ |
683 | - zm = zm + rm; |
684 | - |
685 | - if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */ |
686 | - zm = XDPSRS1(zm); |
687 | + hzm = hzm + hrm + (lzm > (lzm + lrm)); |
688 | + lzm = lzm + lrm; |
689 | + if ((int64_t)hzm < 0) { /* carry out */ |
690 | + srl128(&hzm, &lzm, 1); |
691 | ze++; |
692 | } |
693 | } else { |
694 | - if (zm >= rm) { |
695 | - zm = zm - rm; |
696 | + if (hzm > hrm || (hzm == hrm && lzm >= lrm)) { |
697 | + hzm = hzm - hrm - (lzm < lrm); |
698 | + lzm = lzm - lrm; |
699 | } else { |
700 | - zm = rm - zm; |
701 | + hzm = hrm - hzm - (lrm < lzm); |
702 | + lzm = lrm - lzm; |
703 | zs = rs; |
704 | } |
705 | - if (zm == 0) |
706 | + if (lzm == 0 && hzm == 0) |
707 | return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD); |
708 | |
709 | /* |
710 | - * Normalize to rounding precision. |
711 | + * Put explicit bit at bit 126 if necessary. |
712 | */ |
713 | - while ((zm >> (DP_FBITS + 3)) == 0) { |
714 | - zm <<= 1; |
715 | - ze--; |
716 | + if (hzm == 0) { |
717 | + /* left shift by 63 or 64 bits */ |
718 | + if ((int64_t)lzm < 0) { |
719 | + /* MSB of lzm is the explicit bit */ |
720 | + hzm = lzm >> 1; |
721 | + lzm = lzm << 63; |
722 | + ze -= 63; |
723 | + } else { |
724 | + hzm = lzm; |
725 | + lzm = 0; |
726 | + ze -= 64; |
727 | + } |
728 | + } |
729 | + |
730 | + t = 0; |
731 | + while ((hzm >> (62 - t)) == 0) |
732 | + t++; |
733 | + |
734 | + assert(t <= 62); |
735 | + if (t) { |
736 | + hzm = hzm << t | lzm >> (64 - t); |
737 | + lzm = lzm << t; |
738 | + ze -= t; |
739 | } |
740 | } |
741 | |
742 | - return ieee754dp_format(zs, ze, zm); |
743 | + /* |
744 | + * Move explicit bit from bit 126 to bit 55 since the |
745 | + * ieee754dp_format code expects the mantissa to be |
746 | + * 56 bits wide (53 + 3 rounding bits). |
747 | + */ |
748 | + srl128(&hzm, &lzm, (126 - 55)); |
749 | + |
750 | + return ieee754dp_format(zs, ze, lzm); |
751 | } |
752 | |
753 | union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x, |
754 | @@ -281,5 +343,5 @@ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x, |
755 | union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x, |
756 | union ieee754dp y) |
757 | { |
758 | - return _dp_maddf(z, x, y, maddf_negate_product); |
759 | + return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT); |
760 | } |
761 | diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h |
762 | index 8bc2f6963324..dd2071f430e0 100644 |
763 | --- a/arch/mips/math-emu/ieee754int.h |
764 | +++ b/arch/mips/math-emu/ieee754int.h |
765 | @@ -26,6 +26,10 @@ |
766 | |
767 | #define CLPAIR(x, y) ((x)*6+(y)) |
768 | |
769 | +enum maddf_flags { |
770 | + MADDF_NEGATE_PRODUCT = 1 << 0, |
771 | +}; |
772 | + |
773 | static inline void ieee754_clearcx(void) |
774 | { |
775 | ieee754_csr.cx = 0; |
776 | diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h |
777 | index 8476067075fe..0f63e4202cff 100644 |
778 | --- a/arch/mips/math-emu/ieee754sp.h |
779 | +++ b/arch/mips/math-emu/ieee754sp.h |
780 | @@ -45,6 +45,10 @@ static inline int ieee754sp_finite(union ieee754sp x) |
781 | return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS; |
782 | } |
783 | |
784 | +/* 64 bit right shift with rounding */ |
785 | +#define XSPSRS64(v, rs) \ |
786 | + (((rs) >= 64) ? ((v) != 0) : ((v) >> (rs)) | ((v) << (64-(rs)) != 0)) |
787 | + |
788 | /* 3bit extended single precision sticky right shift */ |
789 | #define XSPSRS(v, rs) \ |
790 | ((rs > (SP_FBITS+3))?1:((v) >> (rs)) | ((v) << (32-(rs)) != 0)) |
791 | diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c |
792 | index 4d000844e48e..74a5a00d2f22 100644 |
793 | --- a/arch/mips/math-emu/sp_fmax.c |
794 | +++ b/arch/mips/math-emu/sp_fmax.c |
795 | @@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y) |
796 | case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): |
797 | return ieee754sp_nanxcpt(x); |
798 | |
799 | - /* numbers are preferred to NaNs */ |
800 | + /* |
801 | + * Quiet NaN handling |
802 | + */ |
803 | + |
804 | + /* |
805 | + * The case of both inputs quiet NaNs |
806 | + */ |
807 | + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
808 | + return x; |
809 | + |
810 | + /* |
811 | + * The cases of exactly one input quiet NaN (numbers |
812 | + * are here preferred as returned values to NaNs) |
813 | + */ |
814 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): |
815 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): |
816 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): |
817 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): |
818 | return x; |
819 | |
820 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
821 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): |
822 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): |
823 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): |
824 | @@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y) |
825 | return ys ? x : y; |
826 | |
827 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): |
828 | - if (xs == ys) |
829 | - return x; |
830 | - return ieee754sp_zero(1); |
831 | + return ieee754sp_zero(xs & ys); |
832 | |
833 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): |
834 | SPDNORMX; |
835 | @@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y) |
836 | else if (xs < ys) |
837 | return x; |
838 | |
839 | - /* Compare exponent */ |
840 | - if (xe > ye) |
841 | - return x; |
842 | - else if (xe < ye) |
843 | - return y; |
844 | + /* Signs of inputs are equal, let's compare exponents */ |
845 | + if (xs == 0) { |
846 | + /* Inputs are both positive */ |
847 | + if (xe > ye) |
848 | + return x; |
849 | + else if (xe < ye) |
850 | + return y; |
851 | + } else { |
852 | + /* Inputs are both negative */ |
853 | + if (xe > ye) |
854 | + return y; |
855 | + else if (xe < ye) |
856 | + return x; |
857 | + } |
858 | |
859 | - /* Compare mantissa */ |
860 | + /* Signs and exponents of inputs are equal, let's compare mantissas */ |
861 | + if (xs == 0) { |
862 | + /* Inputs are both positive, with equal signs and exponents */ |
863 | + if (xm <= ym) |
864 | + return y; |
865 | + return x; |
866 | + } |
867 | + /* Inputs are both negative, with equal signs and exponents */ |
868 | if (xm <= ym) |
869 | - return y; |
870 | - return x; |
871 | + return x; |
872 | + return y; |
873 | } |
874 | |
875 | union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) |
876 | @@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) |
877 | case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): |
878 | return ieee754sp_nanxcpt(x); |
879 | |
880 | - /* numbers are preferred to NaNs */ |
881 | + /* |
882 | + * Quiet NaN handling |
883 | + */ |
884 | + |
885 | + /* |
886 | + * The case of both inputs quiet NaNs |
887 | + */ |
888 | + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
889 | + return x; |
890 | + |
891 | + /* |
892 | + * The cases of exactly one input quiet NaN (numbers |
893 | + * are here preferred as returned values to NaNs) |
894 | + */ |
895 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): |
896 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): |
897 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): |
898 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): |
899 | return x; |
900 | |
901 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
902 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): |
903 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): |
904 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): |
905 | @@ -164,6 +202,9 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) |
906 | /* |
907 | * Infinity and zero handling |
908 | */ |
909 | + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): |
910 | + return ieee754sp_inf(xs & ys); |
911 | + |
912 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): |
913 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): |
914 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): |
915 | @@ -171,7 +212,6 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) |
916 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): |
917 | return x; |
918 | |
919 | - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): |
920 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): |
921 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): |
922 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): |
923 | @@ -180,9 +220,7 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) |
924 | return y; |
925 | |
926 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): |
927 | - if (xs == ys) |
928 | - return x; |
929 | - return ieee754sp_zero(1); |
930 | + return ieee754sp_zero(xs & ys); |
931 | |
932 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): |
933 | SPDNORMX; |
934 | @@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) |
935 | return y; |
936 | |
937 | /* Compare mantissa */ |
938 | - if (xm <= ym) |
939 | + if (xm < ym) |
940 | return y; |
941 | - return x; |
942 | + else if (xm > ym) |
943 | + return x; |
944 | + else if (xs == 0) |
945 | + return x; |
946 | + return y; |
947 | } |
948 | diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c |
949 | index 4eb1bb9e9dec..c51385f46b09 100644 |
950 | --- a/arch/mips/math-emu/sp_fmin.c |
951 | +++ b/arch/mips/math-emu/sp_fmin.c |
952 | @@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y) |
953 | case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): |
954 | return ieee754sp_nanxcpt(x); |
955 | |
956 | - /* numbers are preferred to NaNs */ |
957 | + /* |
958 | + * Quiet NaN handling |
959 | + */ |
960 | + |
961 | + /* |
962 | + * The case of both inputs quiet NaNs |
963 | + */ |
964 | + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
965 | + return x; |
966 | + |
967 | + /* |
968 | + * The cases of exactly one input quiet NaN (numbers |
969 | + * are here preferred as returned values to NaNs) |
970 | + */ |
971 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): |
972 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): |
973 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): |
974 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): |
975 | return x; |
976 | |
977 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
978 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): |
979 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): |
980 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): |
981 | @@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y) |
982 | return ys ? y : x; |
983 | |
984 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): |
985 | - if (xs == ys) |
986 | - return x; |
987 | - return ieee754sp_zero(1); |
988 | + return ieee754sp_zero(xs | ys); |
989 | |
990 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): |
991 | SPDNORMX; |
992 | @@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y) |
993 | else if (xs < ys) |
994 | return y; |
995 | |
996 | - /* Compare exponent */ |
997 | - if (xe > ye) |
998 | - return y; |
999 | - else if (xe < ye) |
1000 | - return x; |
1001 | + /* Signs of inputs are the same, let's compare exponents */ |
1002 | + if (xs == 0) { |
1003 | + /* Inputs are both positive */ |
1004 | + if (xe > ye) |
1005 | + return y; |
1006 | + else if (xe < ye) |
1007 | + return x; |
1008 | + } else { |
1009 | + /* Inputs are both negative */ |
1010 | + if (xe > ye) |
1011 | + return x; |
1012 | + else if (xe < ye) |
1013 | + return y; |
1014 | + } |
1015 | |
1016 | - /* Compare mantissa */ |
1017 | + /* Signs and exponents of inputs are equal, let's compare mantissas */ |
1018 | + if (xs == 0) { |
1019 | + /* Inputs are both positive, with equal signs and exponents */ |
1020 | + if (xm <= ym) |
1021 | + return x; |
1022 | + return y; |
1023 | + } |
1024 | + /* Inputs are both negative, with equal signs and exponents */ |
1025 | if (xm <= ym) |
1026 | - return x; |
1027 | - return y; |
1028 | + return y; |
1029 | + return x; |
1030 | } |
1031 | |
1032 | union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y) |
1033 | @@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y) |
1034 | case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): |
1035 | return ieee754sp_nanxcpt(x); |
1036 | |
1037 | - /* numbers are preferred to NaNs */ |
1038 | + /* |
1039 | + * Quiet NaN handling |
1040 | + */ |
1041 | + |
1042 | + /* |
1043 | + * The case of both inputs quiet NaNs |
1044 | + */ |
1045 | + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
1046 | + return x; |
1047 | + |
1048 | + /* |
1049 | + * The cases of exactly one input quiet NaN (numbers |
1050 | + * are here preferred as returned values to NaNs) |
1051 | + */ |
1052 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): |
1053 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): |
1054 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): |
1055 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): |
1056 | return x; |
1057 | |
1058 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
1059 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): |
1060 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): |
1061 | case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): |
1062 | @@ -164,25 +202,25 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y) |
1063 | /* |
1064 | * Infinity and zero handling |
1065 | */ |
1066 | + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): |
1067 | + return ieee754sp_inf(xs | ys); |
1068 | + |
1069 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): |
1070 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): |
1071 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): |
1072 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): |
1073 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): |
1074 | - return x; |
1075 | + return y; |
1076 | |
1077 | - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): |
1078 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): |
1079 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): |
1080 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): |
1081 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): |
1082 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): |
1083 | - return y; |
1084 | + return x; |
1085 | |
1086 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): |
1087 | - if (xs == ys) |
1088 | - return x; |
1089 | - return ieee754sp_zero(1); |
1090 | + return ieee754sp_zero(xs | ys); |
1091 | |
1092 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): |
1093 | SPDNORMX; |
1094 | @@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y) |
1095 | return x; |
1096 | |
1097 | /* Compare mantissa */ |
1098 | - if (xm <= ym) |
1099 | + if (xm < ym) |
1100 | + return x; |
1101 | + else if (xm > ym) |
1102 | + return y; |
1103 | + else if (xs == 1) |
1104 | return x; |
1105 | return y; |
1106 | } |
1107 | diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c |
1108 | index c91d5e5d9b5f..7195fe785d81 100644 |
1109 | --- a/arch/mips/math-emu/sp_maddf.c |
1110 | +++ b/arch/mips/math-emu/sp_maddf.c |
1111 | @@ -14,9 +14,6 @@ |
1112 | |
1113 | #include "ieee754sp.h" |
1114 | |
1115 | -enum maddf_flags { |
1116 | - maddf_negate_product = 1 << 0, |
1117 | -}; |
1118 | |
1119 | static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, |
1120 | union ieee754sp y, enum maddf_flags flags) |
1121 | @@ -24,14 +21,8 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, |
1122 | int re; |
1123 | int rs; |
1124 | unsigned rm; |
1125 | - unsigned short lxm; |
1126 | - unsigned short hxm; |
1127 | - unsigned short lym; |
1128 | - unsigned short hym; |
1129 | - unsigned lrm; |
1130 | - unsigned hrm; |
1131 | - unsigned t; |
1132 | - unsigned at; |
1133 | + uint64_t rm64; |
1134 | + uint64_t zm64; |
1135 | int s; |
1136 | |
1137 | COMPXSP; |
1138 | @@ -48,51 +39,35 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, |
1139 | |
1140 | ieee754_clearcx(); |
1141 | |
1142 | - switch (zc) { |
1143 | - case IEEE754_CLASS_SNAN: |
1144 | - ieee754_setcx(IEEE754_INVALID_OPERATION); |
1145 | + /* |
1146 | + * Handle the cases when at least one of x, y or z is a NaN. |
1147 | + * Order of precedence is sNaN, qNaN and z, x, y. |
1148 | + */ |
1149 | + if (zc == IEEE754_CLASS_SNAN) |
1150 | return ieee754sp_nanxcpt(z); |
1151 | - case IEEE754_CLASS_DNORM: |
1152 | - SPDNORMZ; |
1153 | - /* QNAN and ZERO cases are handled separately below */ |
1154 | - } |
1155 | - |
1156 | - switch (CLPAIR(xc, yc)) { |
1157 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): |
1158 | - case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): |
1159 | - case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): |
1160 | - case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): |
1161 | - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): |
1162 | + if (xc == IEEE754_CLASS_SNAN) |
1163 | + return ieee754sp_nanxcpt(x); |
1164 | + if (yc == IEEE754_CLASS_SNAN) |
1165 | return ieee754sp_nanxcpt(y); |
1166 | + if (zc == IEEE754_CLASS_QNAN) |
1167 | + return z; |
1168 | + if (xc == IEEE754_CLASS_QNAN) |
1169 | + return x; |
1170 | + if (yc == IEEE754_CLASS_QNAN) |
1171 | + return y; |
1172 | |
1173 | - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): |
1174 | - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): |
1175 | - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): |
1176 | - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): |
1177 | - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): |
1178 | - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): |
1179 | - return ieee754sp_nanxcpt(x); |
1180 | + if (zc == IEEE754_CLASS_DNORM) |
1181 | + SPDNORMZ; |
1182 | + /* ZERO z cases are handled separately below */ |
1183 | |
1184 | - case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): |
1185 | - case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): |
1186 | - case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): |
1187 | - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): |
1188 | - return y; |
1189 | + switch (CLPAIR(xc, yc)) { |
1190 | |
1191 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): |
1192 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): |
1193 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): |
1194 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): |
1195 | - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): |
1196 | - return x; |
1197 | |
1198 | /* |
1199 | * Infinity handling |
1200 | */ |
1201 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): |
1202 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): |
1203 | - if (zc == IEEE754_CLASS_QNAN) |
1204 | - return z; |
1205 | ieee754_setcx(IEEE754_INVALID_OPERATION); |
1206 | return ieee754sp_indef(); |
1207 | |
1208 | @@ -101,9 +76,27 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, |
1209 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): |
1210 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): |
1211 | case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): |
1212 | - if (zc == IEEE754_CLASS_QNAN) |
1213 | - return z; |
1214 | - return ieee754sp_inf(xs ^ ys); |
1215 | + if ((zc == IEEE754_CLASS_INF) && |
1216 | + ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) || |
1217 | + ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) { |
1218 | + /* |
1219 | + * Cases of addition of infinities with opposite signs |
1220 | + * or subtraction of infinities with same signs. |
1221 | + */ |
1222 | + ieee754_setcx(IEEE754_INVALID_OPERATION); |
1223 | + return ieee754sp_indef(); |
1224 | + } |
1225 | + /* |
1226 | + * z is here either not an infinity, or an infinity having the |
1227 | + * same sign as product (x*y) (in case of MADDF.D instruction) |
1228 | + * or product -(x*y) (in MSUBF.D case). The result must be an |
1229 | + * infinity, and its sign is determined only by the value of |
1230 | + * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y. |
1231 | + */ |
1232 | + if (flags & MADDF_NEGATE_PRODUCT) |
1233 | + return ieee754sp_inf(1 ^ (xs ^ ys)); |
1234 | + else |
1235 | + return ieee754sp_inf(xs ^ ys); |
1236 | |
1237 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): |
1238 | case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): |
1239 | @@ -112,32 +105,42 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, |
1240 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): |
1241 | if (zc == IEEE754_CLASS_INF) |
1242 | return ieee754sp_inf(zs); |
1243 | - /* Multiplication is 0 so just return z */ |
1244 | + if (zc == IEEE754_CLASS_ZERO) { |
1245 | + /* Handle cases +0 + (-0) and similar ones. */ |
1246 | + if ((!(flags & MADDF_NEGATE_PRODUCT) |
1247 | + && (zs == (xs ^ ys))) || |
1248 | + ((flags & MADDF_NEGATE_PRODUCT) |
1249 | + && (zs != (xs ^ ys)))) |
1250 | + /* |
1251 | + * Cases of addition of zeros of equal signs |
1252 | + * or subtraction of zeroes of opposite signs. |
1253 | + * The sign of the resulting zero is in any |
1254 | + * such case determined only by the sign of z. |
1255 | + */ |
1256 | + return z; |
1257 | + |
1258 | + return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD); |
1259 | + } |
1260 | + /* x*y is here 0, and z is not 0, so just return z */ |
1261 | return z; |
1262 | |
1263 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): |
1264 | SPDNORMX; |
1265 | |
1266 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): |
1267 | - if (zc == IEEE754_CLASS_QNAN) |
1268 | - return z; |
1269 | - else if (zc == IEEE754_CLASS_INF) |
1270 | + if (zc == IEEE754_CLASS_INF) |
1271 | return ieee754sp_inf(zs); |
1272 | SPDNORMY; |
1273 | break; |
1274 | |
1275 | case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): |
1276 | - if (zc == IEEE754_CLASS_QNAN) |
1277 | - return z; |
1278 | - else if (zc == IEEE754_CLASS_INF) |
1279 | + if (zc == IEEE754_CLASS_INF) |
1280 | return ieee754sp_inf(zs); |
1281 | SPDNORMX; |
1282 | break; |
1283 | |
1284 | case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): |
1285 | - if (zc == IEEE754_CLASS_QNAN) |
1286 | - return z; |
1287 | - else if (zc == IEEE754_CLASS_INF) |
1288 | + if (zc == IEEE754_CLASS_INF) |
1289 | return ieee754sp_inf(zs); |
1290 | /* fall through to real computations */ |
1291 | } |
1292 | @@ -158,111 +161,93 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, |
1293 | |
1294 | re = xe + ye; |
1295 | rs = xs ^ ys; |
1296 | - if (flags & maddf_negate_product) |
1297 | + if (flags & MADDF_NEGATE_PRODUCT) |
1298 | rs ^= 1; |
1299 | |
1300 | - /* shunt to top of word */ |
1301 | - xm <<= 32 - (SP_FBITS + 1); |
1302 | - ym <<= 32 - (SP_FBITS + 1); |
1303 | - |
1304 | - /* |
1305 | - * Multiply 32 bits xm, ym to give high 32 bits rm with stickness. |
1306 | - */ |
1307 | - lxm = xm & 0xffff; |
1308 | - hxm = xm >> 16; |
1309 | - lym = ym & 0xffff; |
1310 | - hym = ym >> 16; |
1311 | - |
1312 | - lrm = lxm * lym; /* 16 * 16 => 32 */ |
1313 | - hrm = hxm * hym; /* 16 * 16 => 32 */ |
1314 | - |
1315 | - t = lxm * hym; /* 16 * 16 => 32 */ |
1316 | - at = lrm + (t << 16); |
1317 | - hrm += at < lrm; |
1318 | - lrm = at; |
1319 | - hrm = hrm + (t >> 16); |
1320 | + /* Multiple 24 bit xm and ym to give 48 bit results */ |
1321 | + rm64 = (uint64_t)xm * ym; |
1322 | |
1323 | - t = hxm * lym; /* 16 * 16 => 32 */ |
1324 | - at = lrm + (t << 16); |
1325 | - hrm += at < lrm; |
1326 | - lrm = at; |
1327 | - hrm = hrm + (t >> 16); |
1328 | + /* Shunt to top of word */ |
1329 | + rm64 = rm64 << 16; |
1330 | |
1331 | - rm = hrm | (lrm != 0); |
1332 | - |
1333 | - /* |
1334 | - * Sticky shift down to normal rounding precision. |
1335 | - */ |
1336 | - if ((int) rm < 0) { |
1337 | - rm = (rm >> (32 - (SP_FBITS + 1 + 3))) | |
1338 | - ((rm << (SP_FBITS + 1 + 3)) != 0); |
1339 | + /* Put explicit bit at bit 62 if necessary */ |
1340 | + if ((int64_t) rm64 < 0) { |
1341 | + rm64 = rm64 >> 1; |
1342 | re++; |
1343 | - } else { |
1344 | - rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) | |
1345 | - ((rm << (SP_FBITS + 1 + 3 + 1)) != 0); |
1346 | } |
1347 | - assert(rm & (SP_HIDDEN_BIT << 3)); |
1348 | - |
1349 | - if (zc == IEEE754_CLASS_ZERO) |
1350 | - return ieee754sp_format(rs, re, rm); |
1351 | |
1352 | - /* And now the addition */ |
1353 | + assert(rm64 & (1 << 62)); |
1354 | |
1355 | - assert(zm & SP_HIDDEN_BIT); |
1356 | + if (zc == IEEE754_CLASS_ZERO) { |
1357 | + /* |
1358 | + * Move explicit bit from bit 62 to bit 26 since the |
1359 | + * ieee754sp_format code expects the mantissa to be |
1360 | + * 27 bits wide (24 + 3 rounding bits). |
1361 | + */ |
1362 | + rm = XSPSRS64(rm64, (62 - 26)); |
1363 | + return ieee754sp_format(rs, re, rm); |
1364 | + } |
1365 | |
1366 | - /* |
1367 | - * Provide guard,round and stick bit space. |
1368 | - */ |
1369 | - zm <<= 3; |
1370 | + /* Move explicit bit from bit 23 to bit 62 */ |
1371 | + zm64 = (uint64_t)zm << (62 - 23); |
1372 | + assert(zm64 & (1 << 62)); |
1373 | |
1374 | + /* Make the exponents the same */ |
1375 | if (ze > re) { |
1376 | /* |
1377 | * Have to shift r fraction right to align. |
1378 | */ |
1379 | s = ze - re; |
1380 | - rm = XSPSRS(rm, s); |
1381 | + rm64 = XSPSRS64(rm64, s); |
1382 | re += s; |
1383 | } else if (re > ze) { |
1384 | /* |
1385 | * Have to shift z fraction right to align. |
1386 | */ |
1387 | s = re - ze; |
1388 | - zm = XSPSRS(zm, s); |
1389 | + zm64 = XSPSRS64(zm64, s); |
1390 | ze += s; |
1391 | } |
1392 | assert(ze == re); |
1393 | assert(ze <= SP_EMAX); |
1394 | |
1395 | + /* Do the addition */ |
1396 | if (zs == rs) { |
1397 | /* |
1398 | - * Generate 28 bit result of adding two 27 bit numbers |
1399 | - * leaving result in zm, zs and ze. |
1400 | + * Generate 64 bit result by adding two 63 bit numbers |
1401 | + * leaving result in zm64, zs and ze. |
1402 | */ |
1403 | - zm = zm + rm; |
1404 | - |
1405 | - if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */ |
1406 | - zm = XSPSRS1(zm); |
1407 | + zm64 = zm64 + rm64; |
1408 | + if ((int64_t)zm64 < 0) { /* carry out */ |
1409 | + zm64 = XSPSRS1(zm64); |
1410 | ze++; |
1411 | } |
1412 | } else { |
1413 | - if (zm >= rm) { |
1414 | - zm = zm - rm; |
1415 | + if (zm64 >= rm64) { |
1416 | + zm64 = zm64 - rm64; |
1417 | } else { |
1418 | - zm = rm - zm; |
1419 | + zm64 = rm64 - zm64; |
1420 | zs = rs; |
1421 | } |
1422 | - if (zm == 0) |
1423 | + if (zm64 == 0) |
1424 | return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD); |
1425 | |
1426 | /* |
1427 | - * Normalize in extended single precision |
1428 | + * Put explicit bit at bit 62 if necessary. |
1429 | */ |
1430 | - while ((zm >> (SP_MBITS + 3)) == 0) { |
1431 | - zm <<= 1; |
1432 | + while ((zm64 >> 62) == 0) { |
1433 | + zm64 <<= 1; |
1434 | ze--; |
1435 | } |
1436 | - |
1437 | } |
1438 | + |
1439 | + /* |
1440 | + * Move explicit bit from bit 62 to bit 26 since the |
1441 | + * ieee754sp_format code expects the mantissa to be |
1442 | + * 27 bits wide (24 + 3 rounding bits). |
1443 | + */ |
1444 | + zm = XSPSRS64(zm64, (62 - 26)); |
1445 | + |
1446 | return ieee754sp_format(zs, ze, zm); |
1447 | } |
1448 | |
1449 | @@ -275,5 +260,5 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x, |
1450 | union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x, |
1451 | union ieee754sp y) |
1452 | { |
1453 | - return _sp_maddf(z, x, y, maddf_negate_product); |
1454 | + return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT); |
1455 | } |
1456 | diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c |
1457 | index ec7a8b099dd9..fd3c1fcc73eb 100644 |
1458 | --- a/arch/powerpc/kernel/align.c |
1459 | +++ b/arch/powerpc/kernel/align.c |
1460 | @@ -235,6 +235,28 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr) |
1461 | |
1462 | #define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz)) |
1463 | |
1464 | +#define __get_user_or_set_dar(_regs, _dest, _addr) \ |
1465 | + ({ \ |
1466 | + int rc = 0; \ |
1467 | + typeof(_addr) __addr = (_addr); \ |
1468 | + if (__get_user_inatomic(_dest, __addr)) { \ |
1469 | + _regs->dar = (unsigned long)__addr; \ |
1470 | + rc = -EFAULT; \ |
1471 | + } \ |
1472 | + rc; \ |
1473 | + }) |
1474 | + |
1475 | +#define __put_user_or_set_dar(_regs, _src, _addr) \ |
1476 | + ({ \ |
1477 | + int rc = 0; \ |
1478 | + typeof(_addr) __addr = (_addr); \ |
1479 | + if (__put_user_inatomic(_src, __addr)) { \ |
1480 | + _regs->dar = (unsigned long)__addr; \ |
1481 | + rc = -EFAULT; \ |
1482 | + } \ |
1483 | + rc; \ |
1484 | + }) |
1485 | + |
1486 | static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, |
1487 | unsigned int reg, unsigned int nb, |
1488 | unsigned int flags, unsigned int instr, |
1489 | @@ -263,9 +285,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, |
1490 | } else { |
1491 | unsigned long pc = regs->nip ^ (swiz & 4); |
1492 | |
1493 | - if (__get_user_inatomic(instr, |
1494 | - (unsigned int __user *)pc)) |
1495 | + if (__get_user_or_set_dar(regs, instr, |
1496 | + (unsigned int __user *)pc)) |
1497 | return -EFAULT; |
1498 | + |
1499 | if (swiz == 0 && (flags & SW)) |
1500 | instr = cpu_to_le32(instr); |
1501 | nb = (instr >> 11) & 0x1f; |
1502 | @@ -309,31 +332,31 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, |
1503 | ((nb0 + 3) / 4) * sizeof(unsigned long)); |
1504 | |
1505 | for (i = 0; i < nb; ++i, ++p) |
1506 | - if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz), |
1507 | - SWIZ_PTR(p))) |
1508 | + if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz), |
1509 | + SWIZ_PTR(p))) |
1510 | return -EFAULT; |
1511 | if (nb0 > 0) { |
1512 | rptr = ®s->gpr[0]; |
1513 | addr += nb; |
1514 | for (i = 0; i < nb0; ++i, ++p) |
1515 | - if (__get_user_inatomic(REG_BYTE(rptr, |
1516 | - i ^ bswiz), |
1517 | - SWIZ_PTR(p))) |
1518 | + if (__get_user_or_set_dar(regs, |
1519 | + REG_BYTE(rptr, i ^ bswiz), |
1520 | + SWIZ_PTR(p))) |
1521 | return -EFAULT; |
1522 | } |
1523 | |
1524 | } else { |
1525 | for (i = 0; i < nb; ++i, ++p) |
1526 | - if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz), |
1527 | - SWIZ_PTR(p))) |
1528 | + if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz), |
1529 | + SWIZ_PTR(p))) |
1530 | return -EFAULT; |
1531 | if (nb0 > 0) { |
1532 | rptr = ®s->gpr[0]; |
1533 | addr += nb; |
1534 | for (i = 0; i < nb0; ++i, ++p) |
1535 | - if (__put_user_inatomic(REG_BYTE(rptr, |
1536 | - i ^ bswiz), |
1537 | - SWIZ_PTR(p))) |
1538 | + if (__put_user_or_set_dar(regs, |
1539 | + REG_BYTE(rptr, i ^ bswiz), |
1540 | + SWIZ_PTR(p))) |
1541 | return -EFAULT; |
1542 | } |
1543 | } |
1544 | @@ -345,29 +368,32 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, |
1545 | * Only POWER6 has these instructions, and it does true little-endian, |
1546 | * so we don't need the address swizzling. |
1547 | */ |
1548 | -static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg, |
1549 | - unsigned int flags) |
1550 | +static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr, |
1551 | + unsigned int reg, unsigned int flags) |
1552 | { |
1553 | char *ptr0 = (char *) ¤t->thread.TS_FPR(reg); |
1554 | char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1); |
1555 | - int i, ret, sw = 0; |
1556 | + int i, sw = 0; |
1557 | |
1558 | if (reg & 1) |
1559 | return 0; /* invalid form: FRS/FRT must be even */ |
1560 | if (flags & SW) |
1561 | sw = 7; |
1562 | - ret = 0; |
1563 | + |
1564 | for (i = 0; i < 8; ++i) { |
1565 | if (!(flags & ST)) { |
1566 | - ret |= __get_user(ptr0[i^sw], addr + i); |
1567 | - ret |= __get_user(ptr1[i^sw], addr + i + 8); |
1568 | + if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i)) |
1569 | + return -EFAULT; |
1570 | + if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8)) |
1571 | + return -EFAULT; |
1572 | } else { |
1573 | - ret |= __put_user(ptr0[i^sw], addr + i); |
1574 | - ret |= __put_user(ptr1[i^sw], addr + i + 8); |
1575 | + if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i)) |
1576 | + return -EFAULT; |
1577 | + if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8)) |
1578 | + return -EFAULT; |
1579 | } |
1580 | } |
1581 | - if (ret) |
1582 | - return -EFAULT; |
1583 | + |
1584 | return 1; /* exception handled and fixed up */ |
1585 | } |
1586 | |
1587 | @@ -377,24 +403,27 @@ static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr, |
1588 | { |
1589 | char *ptr0 = (char *)®s->gpr[reg]; |
1590 | char *ptr1 = (char *)®s->gpr[reg+1]; |
1591 | - int i, ret, sw = 0; |
1592 | + int i, sw = 0; |
1593 | |
1594 | if (reg & 1) |
1595 | return 0; /* invalid form: GPR must be even */ |
1596 | if (flags & SW) |
1597 | sw = 7; |
1598 | - ret = 0; |
1599 | + |
1600 | for (i = 0; i < 8; ++i) { |
1601 | if (!(flags & ST)) { |
1602 | - ret |= __get_user(ptr0[i^sw], addr + i); |
1603 | - ret |= __get_user(ptr1[i^sw], addr + i + 8); |
1604 | + if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i)) |
1605 | + return -EFAULT; |
1606 | + if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8)) |
1607 | + return -EFAULT; |
1608 | } else { |
1609 | - ret |= __put_user(ptr0[i^sw], addr + i); |
1610 | - ret |= __put_user(ptr1[i^sw], addr + i + 8); |
1611 | + if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i)) |
1612 | + return -EFAULT; |
1613 | + if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8)) |
1614 | + return -EFAULT; |
1615 | } |
1616 | } |
1617 | - if (ret) |
1618 | - return -EFAULT; |
1619 | + |
1620 | return 1; /* exception handled and fixed up */ |
1621 | } |
1622 | #endif /* CONFIG_PPC64 */ |
1623 | @@ -687,9 +716,14 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg, |
1624 | for (j = 0; j < length; j += elsize) { |
1625 | for (i = 0; i < elsize; ++i) { |
1626 | if (flags & ST) |
1627 | - ret |= __put_user(ptr[i^sw], addr + i); |
1628 | + ret = __put_user_or_set_dar(regs, ptr[i^sw], |
1629 | + addr + i); |
1630 | else |
1631 | - ret |= __get_user(ptr[i^sw], addr + i); |
1632 | + ret = __get_user_or_set_dar(regs, ptr[i^sw], |
1633 | + addr + i); |
1634 | + |
1635 | + if (ret) |
1636 | + return ret; |
1637 | } |
1638 | ptr += elsize; |
1639 | #ifdef __LITTLE_ENDIAN__ |
1640 | @@ -739,7 +773,7 @@ int fix_alignment(struct pt_regs *regs) |
1641 | unsigned int dsisr; |
1642 | unsigned char __user *addr; |
1643 | unsigned long p, swiz; |
1644 | - int ret, i; |
1645 | + int i; |
1646 | union data { |
1647 | u64 ll; |
1648 | double dd; |
1649 | @@ -936,7 +970,7 @@ int fix_alignment(struct pt_regs *regs) |
1650 | if (flags & F) { |
1651 | /* Special case for 16-byte FP loads and stores */ |
1652 | PPC_WARN_ALIGNMENT(fp_pair, regs); |
1653 | - return emulate_fp_pair(addr, reg, flags); |
1654 | + return emulate_fp_pair(regs, addr, reg, flags); |
1655 | } else { |
1656 | #ifdef CONFIG_PPC64 |
1657 | /* Special case for 16-byte loads and stores */ |
1658 | @@ -966,15 +1000,12 @@ int fix_alignment(struct pt_regs *regs) |
1659 | } |
1660 | |
1661 | data.ll = 0; |
1662 | - ret = 0; |
1663 | p = (unsigned long)addr; |
1664 | |
1665 | for (i = 0; i < nb; i++) |
1666 | - ret |= __get_user_inatomic(data.v[start + i], |
1667 | - SWIZ_PTR(p++)); |
1668 | - |
1669 | - if (unlikely(ret)) |
1670 | - return -EFAULT; |
1671 | + if (__get_user_or_set_dar(regs, data.v[start + i], |
1672 | + SWIZ_PTR(p++))) |
1673 | + return -EFAULT; |
1674 | |
1675 | } else if (flags & F) { |
1676 | data.ll = current->thread.TS_FPR(reg); |
1677 | @@ -1046,15 +1077,13 @@ int fix_alignment(struct pt_regs *regs) |
1678 | break; |
1679 | } |
1680 | |
1681 | - ret = 0; |
1682 | p = (unsigned long)addr; |
1683 | |
1684 | for (i = 0; i < nb; i++) |
1685 | - ret |= __put_user_inatomic(data.v[start + i], |
1686 | - SWIZ_PTR(p++)); |
1687 | + if (__put_user_or_set_dar(regs, data.v[start + i], |
1688 | + SWIZ_PTR(p++))) |
1689 | + return -EFAULT; |
1690 | |
1691 | - if (unlikely(ret)) |
1692 | - return -EFAULT; |
1693 | } else if (flags & F) |
1694 | current->thread.TS_FPR(reg) = data.ll; |
1695 | else |
1696 | diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c |
1697 | index 4c7b8591f737..2cb6cbea4b3b 100644 |
1698 | --- a/arch/powerpc/platforms/powernv/npu-dma.c |
1699 | +++ b/arch/powerpc/platforms/powernv/npu-dma.c |
1700 | @@ -545,6 +545,12 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, |
1701 | struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; |
1702 | unsigned long pid = npu_context->mm->context.id; |
1703 | |
1704 | + /* |
1705 | + * Unfortunately the nest mmu does not support flushing specific |
1706 | + * addresses so we have to flush the whole mm. |
1707 | + */ |
1708 | + flush_tlb_mm(npu_context->mm); |
1709 | + |
1710 | /* |
1711 | * Loop over all the NPUs this process is active on and launch |
1712 | * an invalidate. |
1713 | @@ -576,12 +582,6 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, |
1714 | } |
1715 | } |
1716 | |
1717 | - /* |
1718 | - * Unfortunately the nest mmu does not support flushing specific |
1719 | - * addresses so we have to flush the whole mm. |
1720 | - */ |
1721 | - flush_tlb_mm(npu_context->mm); |
1722 | - |
1723 | mmio_invalidate_wait(mmio_atsd_reg, flush); |
1724 | if (flush) |
1725 | /* Wait for the flush to complete */ |
1726 | diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c |
1727 | index ca9b2f4aaa22..bf2f43f7ac6a 100644 |
1728 | --- a/arch/powerpc/platforms/pseries/hotplug-memory.c |
1729 | +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c |
1730 | @@ -817,6 +817,9 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop) |
1731 | return -EINVAL; |
1732 | |
1733 | for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) { |
1734 | + if (lmbs[i].flags & DRCONF_MEM_ASSIGNED) |
1735 | + continue; |
1736 | + |
1737 | rc = dlpar_acquire_drc(lmbs[i].drc_index); |
1738 | if (rc) |
1739 | continue; |
1740 | @@ -859,6 +862,7 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop) |
1741 | lmbs[i].base_addr, lmbs[i].drc_index); |
1742 | lmbs[i].reserved = 0; |
1743 | } |
1744 | + rc = 0; |
1745 | } |
1746 | |
1747 | return rc; |
1748 | diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h |
1749 | index bd6f30304518..3525fe6e7e4c 100644 |
1750 | --- a/arch/s390/include/asm/mmu.h |
1751 | +++ b/arch/s390/include/asm/mmu.h |
1752 | @@ -5,6 +5,7 @@ |
1753 | #include <linux/errno.h> |
1754 | |
1755 | typedef struct { |
1756 | + spinlock_t lock; |
1757 | cpumask_t cpu_attach_mask; |
1758 | atomic_t flush_count; |
1759 | unsigned int flush_mm; |
1760 | @@ -27,6 +28,7 @@ typedef struct { |
1761 | } mm_context_t; |
1762 | |
1763 | #define INIT_MM_CONTEXT(name) \ |
1764 | + .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \ |
1765 | .context.pgtable_lock = \ |
1766 | __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \ |
1767 | .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ |
1768 | diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h |
1769 | index 24bc41622a98..ebfb2f248ae9 100644 |
1770 | --- a/arch/s390/include/asm/mmu_context.h |
1771 | +++ b/arch/s390/include/asm/mmu_context.h |
1772 | @@ -16,6 +16,7 @@ |
1773 | static inline int init_new_context(struct task_struct *tsk, |
1774 | struct mm_struct *mm) |
1775 | { |
1776 | + spin_lock_init(&mm->context.lock); |
1777 | spin_lock_init(&mm->context.pgtable_lock); |
1778 | INIT_LIST_HEAD(&mm->context.pgtable_list); |
1779 | spin_lock_init(&mm->context.gmap_lock); |
1780 | @@ -102,7 +103,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
1781 | if (prev == next) |
1782 | return; |
1783 | cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); |
1784 | - cpumask_set_cpu(cpu, mm_cpumask(next)); |
1785 | /* Clear old ASCE by loading the kernel ASCE. */ |
1786 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
1787 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); |
1788 | @@ -120,9 +120,8 @@ static inline void finish_arch_post_lock_switch(void) |
1789 | preempt_disable(); |
1790 | while (atomic_read(&mm->context.flush_count)) |
1791 | cpu_relax(); |
1792 | - |
1793 | - if (mm->context.flush_mm) |
1794 | - __tlb_flush_mm(mm); |
1795 | + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
1796 | + __tlb_flush_mm_lazy(mm); |
1797 | preempt_enable(); |
1798 | } |
1799 | set_fs(current->thread.mm_segment); |
1800 | @@ -135,6 +134,7 @@ static inline void activate_mm(struct mm_struct *prev, |
1801 | struct mm_struct *next) |
1802 | { |
1803 | switch_mm(prev, next, current); |
1804 | + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); |
1805 | set_user_asce(next); |
1806 | } |
1807 | |
1808 | diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h |
1809 | index 39846100682a..eed927aeb08f 100644 |
1810 | --- a/arch/s390/include/asm/tlbflush.h |
1811 | +++ b/arch/s390/include/asm/tlbflush.h |
1812 | @@ -43,23 +43,6 @@ static inline void __tlb_flush_global(void) |
1813 | * Flush TLB entries for a specific mm on all CPUs (in case gmap is used |
1814 | * this implicates multiple ASCEs!). |
1815 | */ |
1816 | -static inline void __tlb_flush_full(struct mm_struct *mm) |
1817 | -{ |
1818 | - preempt_disable(); |
1819 | - atomic_inc(&mm->context.flush_count); |
1820 | - if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { |
1821 | - /* Local TLB flush */ |
1822 | - __tlb_flush_local(); |
1823 | - } else { |
1824 | - /* Global TLB flush */ |
1825 | - __tlb_flush_global(); |
1826 | - /* Reset TLB flush mask */ |
1827 | - cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); |
1828 | - } |
1829 | - atomic_dec(&mm->context.flush_count); |
1830 | - preempt_enable(); |
1831 | -} |
1832 | - |
1833 | static inline void __tlb_flush_mm(struct mm_struct *mm) |
1834 | { |
1835 | unsigned long gmap_asce; |
1836 | @@ -71,16 +54,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm) |
1837 | */ |
1838 | preempt_disable(); |
1839 | atomic_inc(&mm->context.flush_count); |
1840 | + /* Reset TLB flush mask */ |
1841 | + cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); |
1842 | + barrier(); |
1843 | gmap_asce = READ_ONCE(mm->context.gmap_asce); |
1844 | if (MACHINE_HAS_IDTE && gmap_asce != -1UL) { |
1845 | if (gmap_asce) |
1846 | __tlb_flush_idte(gmap_asce); |
1847 | __tlb_flush_idte(mm->context.asce); |
1848 | } else { |
1849 | - __tlb_flush_full(mm); |
1850 | + /* Global TLB flush */ |
1851 | + __tlb_flush_global(); |
1852 | } |
1853 | - /* Reset TLB flush mask */ |
1854 | - cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); |
1855 | atomic_dec(&mm->context.flush_count); |
1856 | preempt_enable(); |
1857 | } |
1858 | @@ -94,7 +79,6 @@ static inline void __tlb_flush_kernel(void) |
1859 | } |
1860 | #else |
1861 | #define __tlb_flush_global() __tlb_flush_local() |
1862 | -#define __tlb_flush_full(mm) __tlb_flush_local() |
1863 | |
1864 | /* |
1865 | * Flush TLB entries for a specific ASCE on all CPUs. |
1866 | @@ -112,10 +96,12 @@ static inline void __tlb_flush_kernel(void) |
1867 | |
1868 | static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) |
1869 | { |
1870 | + spin_lock(&mm->context.lock); |
1871 | if (mm->context.flush_mm) { |
1872 | - __tlb_flush_mm(mm); |
1873 | mm->context.flush_mm = 0; |
1874 | + __tlb_flush_mm(mm); |
1875 | } |
1876 | + spin_unlock(&mm->context.lock); |
1877 | } |
1878 | |
1879 | /* |
1880 | diff --git a/block/blk-core.c b/block/blk-core.c |
1881 | index dbecbf4a64e0..658f67309602 100644 |
1882 | --- a/block/blk-core.c |
1883 | +++ b/block/blk-core.c |
1884 | @@ -280,7 +280,7 @@ EXPORT_SYMBOL(blk_start_queue_async); |
1885 | void blk_start_queue(struct request_queue *q) |
1886 | { |
1887 | lockdep_assert_held(q->queue_lock); |
1888 | - WARN_ON(!irqs_disabled()); |
1889 | + WARN_ON(!in_interrupt() && !irqs_disabled()); |
1890 | WARN_ON_ONCE(q->mq_ops); |
1891 | |
1892 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
1893 | @@ -2330,7 +2330,12 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * |
1894 | if (q->mq_ops) { |
1895 | if (blk_queue_io_stat(q)) |
1896 | blk_account_io_start(rq, true); |
1897 | - blk_mq_sched_insert_request(rq, false, true, false, false); |
1898 | + /* |
1899 | + * Since we have a scheduler attached on the top device, |
1900 | + * bypass a potential scheduler on the bottom device for |
1901 | + * insert. |
1902 | + */ |
1903 | + blk_mq_request_bypass_insert(rq); |
1904 | return BLK_STS_OK; |
1905 | } |
1906 | |
1907 | diff --git a/block/blk-mq.c b/block/blk-mq.c |
1908 | index 4603b115e234..e0523eb8eee1 100644 |
1909 | --- a/block/blk-mq.c |
1910 | +++ b/block/blk-mq.c |
1911 | @@ -1357,6 +1357,22 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
1912 | blk_mq_hctx_mark_pending(hctx, ctx); |
1913 | } |
1914 | |
1915 | +/* |
1916 | + * Should only be used carefully, when the caller knows we want to |
1917 | + * bypass a potential IO scheduler on the target device. |
1918 | + */ |
1919 | +void blk_mq_request_bypass_insert(struct request *rq) |
1920 | +{ |
1921 | + struct blk_mq_ctx *ctx = rq->mq_ctx; |
1922 | + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); |
1923 | + |
1924 | + spin_lock(&hctx->lock); |
1925 | + list_add_tail(&rq->queuelist, &hctx->dispatch); |
1926 | + spin_unlock(&hctx->lock); |
1927 | + |
1928 | + blk_mq_run_hw_queue(hctx, false); |
1929 | +} |
1930 | + |
1931 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
1932 | struct list_head *list) |
1933 | |
1934 | diff --git a/block/blk-mq.h b/block/blk-mq.h |
1935 | index 60b01c0309bc..f64747914560 100644 |
1936 | --- a/block/blk-mq.h |
1937 | +++ b/block/blk-mq.h |
1938 | @@ -54,6 +54,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, |
1939 | */ |
1940 | void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
1941 | bool at_head); |
1942 | +void blk_mq_request_bypass_insert(struct request *rq); |
1943 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
1944 | struct list_head *list); |
1945 | |
1946 | diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c |
1947 | index 903605dbc1a5..76b875c69a95 100644 |
1948 | --- a/crypto/algif_skcipher.c |
1949 | +++ b/crypto/algif_skcipher.c |
1950 | @@ -144,8 +144,10 @@ static int skcipher_alloc_sgl(struct sock *sk) |
1951 | sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); |
1952 | sgl->cur = 0; |
1953 | |
1954 | - if (sg) |
1955 | + if (sg) { |
1956 | sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); |
1957 | + sg_unmark_end(sg + (MAX_SGL_ENTS - 1)); |
1958 | + } |
1959 | |
1960 | list_add_tail(&sgl->list, &ctx->tsgl); |
1961 | } |
1962 | diff --git a/crypto/scompress.c b/crypto/scompress.c |
1963 | index ae1d3cf209e4..0b40d991d65f 100644 |
1964 | --- a/crypto/scompress.c |
1965 | +++ b/crypto/scompress.c |
1966 | @@ -211,9 +211,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) |
1967 | scratch_dst, &req->dlen, *ctx); |
1968 | if (!ret) { |
1969 | if (!req->dst) { |
1970 | - req->dst = crypto_scomp_sg_alloc(req->dlen, |
1971 | - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
1972 | - GFP_KERNEL : GFP_ATOMIC); |
1973 | + req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC); |
1974 | if (!req->dst) |
1975 | goto out; |
1976 | } |
1977 | diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c |
1978 | index d0368682bd43..153f20ce318b 100644 |
1979 | --- a/drivers/block/skd_main.c |
1980 | +++ b/drivers/block/skd_main.c |
1981 | @@ -2160,6 +2160,9 @@ static void skd_send_fitmsg(struct skd_device *skdev, |
1982 | */ |
1983 | qcmd |= FIT_QCMD_MSGSIZE_64; |
1984 | |
1985 | + /* Make sure skd_msg_buf is written before the doorbell is triggered. */ |
1986 | + smp_wmb(); |
1987 | + |
1988 | SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); |
1989 | } |
1990 | |
1991 | @@ -2202,6 +2205,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev, |
1992 | qcmd = skspcl->mb_dma_address; |
1993 | qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; |
1994 | |
1995 | + /* Make sure skd_msg_buf is written before the doorbell is triggered. */ |
1996 | + smp_wmb(); |
1997 | + |
1998 | SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); |
1999 | } |
2000 | |
2001 | @@ -4539,15 +4545,16 @@ static void skd_free_disk(struct skd_device *skdev) |
2002 | { |
2003 | struct gendisk *disk = skdev->disk; |
2004 | |
2005 | - if (disk != NULL) { |
2006 | - struct request_queue *q = disk->queue; |
2007 | + if (disk && (disk->flags & GENHD_FL_UP)) |
2008 | + del_gendisk(disk); |
2009 | |
2010 | - if (disk->flags & GENHD_FL_UP) |
2011 | - del_gendisk(disk); |
2012 | - if (q) |
2013 | - blk_cleanup_queue(q); |
2014 | - put_disk(disk); |
2015 | + if (skdev->queue) { |
2016 | + blk_cleanup_queue(skdev->queue); |
2017 | + skdev->queue = NULL; |
2018 | + disk->queue = NULL; |
2019 | } |
2020 | + |
2021 | + put_disk(disk); |
2022 | skdev->disk = NULL; |
2023 | } |
2024 | |
2025 | diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c |
2026 | index 3425f2d9a2a1..fe0185ceac16 100644 |
2027 | --- a/drivers/crypto/caam/caamalg_qi.c |
2028 | +++ b/drivers/crypto/caam/caamalg_qi.c |
2029 | @@ -776,9 +776,9 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) |
2030 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); |
2031 | struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher); |
2032 | struct device *qidev = caam_ctx->qidev; |
2033 | -#ifdef DEBUG |
2034 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); |
2035 | |
2036 | +#ifdef DEBUG |
2037 | dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); |
2038 | #endif |
2039 | |
2040 | @@ -799,6 +799,13 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) |
2041 | ablkcipher_unmap(qidev, edesc, req); |
2042 | qi_cache_free(edesc); |
2043 | |
2044 | + /* |
2045 | + * The crypto API expects us to set the IV (req->info) to the last |
2046 | + * ciphertext block. This is used e.g. by the CTS mode. |
2047 | + */ |
2048 | + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, |
2049 | + ivsize, 0); |
2050 | + |
2051 | ablkcipher_request_complete(req, status); |
2052 | } |
2053 | |
2054 | @@ -1968,7 +1975,7 @@ static struct caam_aead_alg driver_aeads[] = { |
2055 | .cra_name = "echainiv(authenc(hmac(sha256)," |
2056 | "cbc(des)))", |
2057 | .cra_driver_name = "echainiv-authenc-" |
2058 | - "hmac-sha256-cbc-desi-" |
2059 | + "hmac-sha256-cbc-des-" |
2060 | "caam-qi", |
2061 | .cra_blocksize = DES_BLOCK_SIZE, |
2062 | }, |
2063 | diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c |
2064 | index 58a4244b4752..3f26a415ef44 100644 |
2065 | --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c |
2066 | +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c |
2067 | @@ -1,8 +1,9 @@ |
2068 | /* |
2069 | * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support |
2070 | * |
2071 | - * Copyright (C) 2013 Advanced Micro Devices, Inc. |
2072 | + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. |
2073 | * |
2074 | + * Author: Gary R Hook <gary.hook@amd.com> |
2075 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
2076 | * |
2077 | * This program is free software; you can redistribute it and/or modify |
2078 | @@ -164,6 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, |
2079 | memset(&rctx->cmd, 0, sizeof(rctx->cmd)); |
2080 | INIT_LIST_HEAD(&rctx->cmd.entry); |
2081 | rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; |
2082 | + rctx->cmd.u.xts.type = CCP_AES_TYPE_128; |
2083 | rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT |
2084 | : CCP_AES_ACTION_DECRYPT; |
2085 | rctx->cmd.u.xts.unit_size = unit_size; |
2086 | diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c |
2087 | index b10d2d2075cb..9bc134a4ebf0 100644 |
2088 | --- a/drivers/crypto/ccp/ccp-dev-v5.c |
2089 | +++ b/drivers/crypto/ccp/ccp-dev-v5.c |
2090 | @@ -145,6 +145,7 @@ union ccp_function { |
2091 | #define CCP_AES_MODE(p) ((p)->aes.mode) |
2092 | #define CCP_AES_TYPE(p) ((p)->aes.type) |
2093 | #define CCP_XTS_SIZE(p) ((p)->aes_xts.size) |
2094 | +#define CCP_XTS_TYPE(p) ((p)->aes_xts.type) |
2095 | #define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt) |
2096 | #define CCP_DES3_SIZE(p) ((p)->des3.size) |
2097 | #define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt) |
2098 | @@ -344,6 +345,7 @@ static int ccp5_perform_xts_aes(struct ccp_op *op) |
2099 | CCP5_CMD_PROT(&desc) = 0; |
2100 | |
2101 | function.raw = 0; |
2102 | + CCP_XTS_TYPE(&function) = op->u.xts.type; |
2103 | CCP_XTS_ENCRYPT(&function) = op->u.xts.action; |
2104 | CCP_XTS_SIZE(&function) = op->u.xts.unit_size; |
2105 | CCP5_CMD_FUNCTION(&desc) = function.raw; |
2106 | diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h |
2107 | index a70154ac7405..7b8370e9c42e 100644 |
2108 | --- a/drivers/crypto/ccp/ccp-dev.h |
2109 | +++ b/drivers/crypto/ccp/ccp-dev.h |
2110 | @@ -192,6 +192,7 @@ |
2111 | #define CCP_AES_CTX_SB_COUNT 1 |
2112 | |
2113 | #define CCP_XTS_AES_KEY_SB_COUNT 1 |
2114 | +#define CCP5_XTS_AES_KEY_SB_COUNT 2 |
2115 | #define CCP_XTS_AES_CTX_SB_COUNT 1 |
2116 | |
2117 | #define CCP_DES3_KEY_SB_COUNT 1 |
2118 | @@ -497,6 +498,7 @@ struct ccp_aes_op { |
2119 | }; |
2120 | |
2121 | struct ccp_xts_aes_op { |
2122 | + enum ccp_aes_type type; |
2123 | enum ccp_aes_action action; |
2124 | enum ccp_xts_aes_unit_size unit_size; |
2125 | }; |
2126 | diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c |
2127 | index c0dfdacbdff5..f3542aede519 100644 |
2128 | --- a/drivers/crypto/ccp/ccp-ops.c |
2129 | +++ b/drivers/crypto/ccp/ccp-ops.c |
2130 | @@ -1038,6 +1038,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, |
2131 | struct ccp_op op; |
2132 | unsigned int unit_size, dm_offset; |
2133 | bool in_place = false; |
2134 | + unsigned int sb_count; |
2135 | + enum ccp_aes_type aestype; |
2136 | int ret; |
2137 | |
2138 | switch (xts->unit_size) { |
2139 | @@ -1061,7 +1063,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, |
2140 | return -EINVAL; |
2141 | } |
2142 | |
2143 | - if (xts->key_len != AES_KEYSIZE_128) |
2144 | + if (xts->key_len == AES_KEYSIZE_128) |
2145 | + aestype = CCP_AES_TYPE_128; |
2146 | + else |
2147 | return -EINVAL; |
2148 | |
2149 | if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) |
2150 | @@ -1083,23 +1087,44 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, |
2151 | op.sb_key = cmd_q->sb_key; |
2152 | op.sb_ctx = cmd_q->sb_ctx; |
2153 | op.init = 1; |
2154 | + op.u.xts.type = aestype; |
2155 | op.u.xts.action = xts->action; |
2156 | op.u.xts.unit_size = xts->unit_size; |
2157 | |
2158 | - /* All supported key sizes fit in a single (32-byte) SB entry |
2159 | - * and must be in little endian format. Use the 256-bit byte |
2160 | - * swap passthru option to convert from big endian to little |
2161 | - * endian. |
2162 | + /* A version 3 device only supports 128-bit keys, which fits into a |
2163 | + * single SB entry. A version 5 device uses a 512-bit vector, so two |
2164 | + * SB entries. |
2165 | */ |
2166 | + if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) |
2167 | + sb_count = CCP_XTS_AES_KEY_SB_COUNT; |
2168 | + else |
2169 | + sb_count = CCP5_XTS_AES_KEY_SB_COUNT; |
2170 | ret = ccp_init_dm_workarea(&key, cmd_q, |
2171 | - CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES, |
2172 | + sb_count * CCP_SB_BYTES, |
2173 | DMA_TO_DEVICE); |
2174 | if (ret) |
2175 | return ret; |
2176 | |
2177 | - dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; |
2178 | - ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); |
2179 | - ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len); |
2180 | + if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { |
2181 | + /* All supported key sizes must be in little endian format. |
2182 | + * Use the 256-bit byte swap passthru option to convert from |
2183 | + * big endian to little endian. |
2184 | + */ |
2185 | + dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; |
2186 | + ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); |
2187 | + ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); |
2188 | + } else { |
2189 | + /* Version 5 CCPs use a 512-bit space for the key: each portion |
2190 | + * occupies 256 bits, or one entire slot, and is zero-padded. |
2191 | + */ |
2192 | + unsigned int pad; |
2193 | + |
2194 | + dm_offset = CCP_SB_BYTES; |
2195 | + pad = dm_offset - xts->key_len; |
2196 | + ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); |
2197 | + ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len, |
2198 | + xts->key_len); |
2199 | + } |
2200 | ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, |
2201 | CCP_PASSTHRU_BYTESWAP_256BIT); |
2202 | if (ret) { |
2203 | diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c |
2204 | index dea04871b50d..a1c4ee818614 100644 |
2205 | --- a/drivers/devfreq/devfreq.c |
2206 | +++ b/drivers/devfreq/devfreq.c |
2207 | @@ -564,7 +564,7 @@ struct devfreq *devfreq_add_device(struct device *dev, |
2208 | err = device_register(&devfreq->dev); |
2209 | if (err) { |
2210 | mutex_unlock(&devfreq->lock); |
2211 | - goto err_out; |
2212 | + goto err_dev; |
2213 | } |
2214 | |
2215 | devfreq->trans_table = devm_kzalloc(&devfreq->dev, |
2216 | @@ -610,6 +610,9 @@ struct devfreq *devfreq_add_device(struct device *dev, |
2217 | mutex_unlock(&devfreq_list_lock); |
2218 | |
2219 | device_unregister(&devfreq->dev); |
2220 | +err_dev: |
2221 | + if (devfreq) |
2222 | + kfree(devfreq); |
2223 | err_out: |
2224 | return ERR_PTR(err); |
2225 | } |
2226 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c |
2227 | index 4083be61b328..6417febe18b9 100644 |
2228 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c |
2229 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c |
2230 | @@ -95,9 +95,8 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index, |
2231 | int i; |
2232 | struct amdgpu_device *adev = psp->adev; |
2233 | |
2234 | - val = RREG32(reg_index); |
2235 | - |
2236 | for (i = 0; i < adev->usec_timeout; i++) { |
2237 | + val = RREG32(reg_index); |
2238 | if (check_changed) { |
2239 | if (val != reg_val) |
2240 | return 0; |
2241 | diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c |
2242 | index c98d77d0c8f8..6f80ad8f588b 100644 |
2243 | --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c |
2244 | +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c |
2245 | @@ -237,11 +237,9 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp) |
2246 | |
2247 | /* there might be handshake issue with hardware which needs delay */ |
2248 | mdelay(20); |
2249 | -#if 0 |
2250 | ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), |
2251 | RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), |
2252 | 0, true); |
2253 | -#endif |
2254 | |
2255 | return ret; |
2256 | } |
2257 | diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c |
2258 | index 4a11d4da4c92..ff30b34c8984 100644 |
2259 | --- a/drivers/infiniband/hw/hfi1/init.c |
2260 | +++ b/drivers/infiniband/hw/hfi1/init.c |
2261 | @@ -483,7 +483,6 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, |
2262 | |
2263 | ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; |
2264 | ppd->part_enforce |= HFI1_PART_ENFORCE_IN; |
2265 | - ppd->part_enforce |= HFI1_PART_ENFORCE_OUT; |
2266 | |
2267 | if (loopback) { |
2268 | hfi1_early_err(&pdev->dev, |
2269 | diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c |
2270 | index 1080778a1f7c..0c73fb0c2c1b 100644 |
2271 | --- a/drivers/infiniband/hw/hfi1/rc.c |
2272 | +++ b/drivers/infiniband/hw/hfi1/rc.c |
2273 | @@ -425,7 +425,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) |
2274 | case IB_WR_RDMA_WRITE: |
2275 | if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) |
2276 | qp->s_lsn++; |
2277 | - /* FALLTHROUGH */ |
2278 | + goto no_flow_control; |
2279 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2280 | /* If no credit, return. */ |
2281 | if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && |
2282 | @@ -433,6 +433,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) |
2283 | qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; |
2284 | goto bail; |
2285 | } |
2286 | +no_flow_control: |
2287 | put_ib_reth_vaddr( |
2288 | wqe->rdma_wr.remote_addr, |
2289 | &ohdr->u.rc.reth); |
2290 | diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c |
2291 | index 2c40a2e989d2..a0eb2f96179a 100644 |
2292 | --- a/drivers/infiniband/hw/mlx5/mr.c |
2293 | +++ b/drivers/infiniband/hw/mlx5/mr.c |
2294 | @@ -48,6 +48,7 @@ enum { |
2295 | #define MLX5_UMR_ALIGN 2048 |
2296 | |
2297 | static int clean_mr(struct mlx5_ib_mr *mr); |
2298 | +static int max_umr_order(struct mlx5_ib_dev *dev); |
2299 | static int use_umr(struct mlx5_ib_dev *dev, int order); |
2300 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); |
2301 | |
2302 | @@ -491,16 +492,18 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) |
2303 | struct mlx5_mr_cache *cache = &dev->cache; |
2304 | struct mlx5_ib_mr *mr = NULL; |
2305 | struct mlx5_cache_ent *ent; |
2306 | + int last_umr_cache_entry; |
2307 | int c; |
2308 | int i; |
2309 | |
2310 | c = order2idx(dev, order); |
2311 | - if (c < 0 || c > MAX_UMR_CACHE_ENTRY) { |
2312 | + last_umr_cache_entry = order2idx(dev, max_umr_order(dev)); |
2313 | + if (c < 0 || c > last_umr_cache_entry) { |
2314 | mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c); |
2315 | return NULL; |
2316 | } |
2317 | |
2318 | - for (i = c; i < MAX_UMR_CACHE_ENTRY; i++) { |
2319 | + for (i = c; i <= last_umr_cache_entry; i++) { |
2320 | ent = &cache->ent[i]; |
2321 | |
2322 | mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); |
2323 | @@ -816,11 +819,16 @@ static int get_octo_len(u64 addr, u64 len, int page_size) |
2324 | return (npages + 1) / 2; |
2325 | } |
2326 | |
2327 | -static int use_umr(struct mlx5_ib_dev *dev, int order) |
2328 | +static int max_umr_order(struct mlx5_ib_dev *dev) |
2329 | { |
2330 | if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) |
2331 | - return order <= MAX_UMR_CACHE_ENTRY + 2; |
2332 | - return order <= MLX5_MAX_UMR_SHIFT; |
2333 | + return MAX_UMR_CACHE_ENTRY + 2; |
2334 | + return MLX5_MAX_UMR_SHIFT; |
2335 | +} |
2336 | + |
2337 | +static int use_umr(struct mlx5_ib_dev *dev, int order) |
2338 | +{ |
2339 | + return order <= max_umr_order(dev); |
2340 | } |
2341 | |
2342 | static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, |
2343 | diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c |
2344 | index 4ddbcac5eabe..e9a91736b12d 100644 |
2345 | --- a/drivers/infiniband/hw/qib/qib_rc.c |
2346 | +++ b/drivers/infiniband/hw/qib/qib_rc.c |
2347 | @@ -348,7 +348,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) |
2348 | case IB_WR_RDMA_WRITE: |
2349 | if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) |
2350 | qp->s_lsn++; |
2351 | - /* FALLTHROUGH */ |
2352 | + goto no_flow_control; |
2353 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2354 | /* If no credit, return. */ |
2355 | if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && |
2356 | @@ -356,7 +356,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) |
2357 | qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; |
2358 | goto bail; |
2359 | } |
2360 | - |
2361 | +no_flow_control: |
2362 | ohdr->u.rc.reth.vaddr = |
2363 | cpu_to_be64(wqe->rdma_wr.remote_addr); |
2364 | ohdr->u.rc.reth.rkey = |
2365 | diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c |
2366 | index ca0e19ae7a90..f6d0c8f51613 100644 |
2367 | --- a/drivers/input/joystick/xpad.c |
2368 | +++ b/drivers/input/joystick/xpad.c |
2369 | @@ -1764,10 +1764,12 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id |
2370 | struct usb_endpoint_descriptor *ep = |
2371 | &intf->cur_altsetting->endpoint[i].desc; |
2372 | |
2373 | - if (usb_endpoint_dir_in(ep)) |
2374 | - ep_irq_in = ep; |
2375 | - else |
2376 | - ep_irq_out = ep; |
2377 | + if (usb_endpoint_xfer_int(ep)) { |
2378 | + if (usb_endpoint_dir_in(ep)) |
2379 | + ep_irq_in = ep; |
2380 | + else |
2381 | + ep_irq_out = ep; |
2382 | + } |
2383 | } |
2384 | |
2385 | if (!ep_irq_in || !ep_irq_out) { |
2386 | diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h |
2387 | index f932a83b4990..9125ad017eda 100644 |
2388 | --- a/drivers/input/serio/i8042-x86ia64io.h |
2389 | +++ b/drivers/input/serio/i8042-x86ia64io.h |
2390 | @@ -839,6 +839,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { |
2391 | DMI_MATCH(DMI_PRODUCT_NAME, "P34"), |
2392 | }, |
2393 | }, |
2394 | + { |
2395 | + /* Gigabyte P57 - Elantech touchpad */ |
2396 | + .matches = { |
2397 | + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), |
2398 | + DMI_MATCH(DMI_PRODUCT_NAME, "P57"), |
2399 | + }, |
2400 | + }, |
2401 | { |
2402 | /* Schenker XMG C504 - Elantech touchpad */ |
2403 | .matches = { |
2404 | diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c |
2405 | index da67882caa7b..0e298ed42ae0 100644 |
2406 | --- a/drivers/mailbox/bcm-flexrm-mailbox.c |
2407 | +++ b/drivers/mailbox/bcm-flexrm-mailbox.c |
2408 | @@ -95,7 +95,7 @@ |
2409 | |
2410 | /* Register RING_CMPL_START_ADDR fields */ |
2411 | #define CMPL_START_ADDR_VALUE(pa) \ |
2412 | - ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x03ffffff)) |
2413 | + ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff)) |
2414 | |
2415 | /* Register RING_CONTROL fields */ |
2416 | #define CONTROL_MASK_DISABLE_CONTROL 12 |
2417 | diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h |
2418 | index dee542fff68e..2ed9bd231d84 100644 |
2419 | --- a/drivers/md/bcache/bcache.h |
2420 | +++ b/drivers/md/bcache/bcache.h |
2421 | @@ -333,6 +333,7 @@ struct cached_dev { |
2422 | /* Limit number of writeback bios in flight */ |
2423 | struct semaphore in_flight; |
2424 | struct task_struct *writeback_thread; |
2425 | + struct workqueue_struct *writeback_write_wq; |
2426 | |
2427 | struct keybuf writeback_keys; |
2428 | |
2429 | diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c |
2430 | index 019b3df9f1c6..4b413db99276 100644 |
2431 | --- a/drivers/md/bcache/request.c |
2432 | +++ b/drivers/md/bcache/request.c |
2433 | @@ -196,12 +196,12 @@ static void bch_data_insert_start(struct closure *cl) |
2434 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
2435 | struct bio *bio = op->bio, *n; |
2436 | |
2437 | - if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) |
2438 | - wake_up_gc(op->c); |
2439 | - |
2440 | if (op->bypass) |
2441 | return bch_data_invalidate(cl); |
2442 | |
2443 | + if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) |
2444 | + wake_up_gc(op->c); |
2445 | + |
2446 | /* |
2447 | * Journal writes are marked REQ_PREFLUSH; if the original write was a |
2448 | * flush, it'll wait on the journal write. |
2449 | @@ -400,12 +400,6 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) |
2450 | if (!congested && !dc->sequential_cutoff) |
2451 | goto rescale; |
2452 | |
2453 | - if (!congested && |
2454 | - mode == CACHE_MODE_WRITEBACK && |
2455 | - op_is_write(bio->bi_opf) && |
2456 | - op_is_sync(bio->bi_opf)) |
2457 | - goto rescale; |
2458 | - |
2459 | spin_lock(&dc->io_lock); |
2460 | |
2461 | hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) |
2462 | diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c |
2463 | index 8352fad765f6..046fc5bddf54 100644 |
2464 | --- a/drivers/md/bcache/super.c |
2465 | +++ b/drivers/md/bcache/super.c |
2466 | @@ -1026,7 +1026,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) |
2467 | } |
2468 | |
2469 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { |
2470 | - bch_sectors_dirty_init(dc); |
2471 | + bch_sectors_dirty_init(&dc->disk); |
2472 | atomic_set(&dc->has_dirty, 1); |
2473 | atomic_inc(&dc->count); |
2474 | bch_writeback_queue(dc); |
2475 | @@ -1059,6 +1059,8 @@ static void cached_dev_free(struct closure *cl) |
2476 | cancel_delayed_work_sync(&dc->writeback_rate_update); |
2477 | if (!IS_ERR_OR_NULL(dc->writeback_thread)) |
2478 | kthread_stop(dc->writeback_thread); |
2479 | + if (dc->writeback_write_wq) |
2480 | + destroy_workqueue(dc->writeback_write_wq); |
2481 | |
2482 | mutex_lock(&bch_register_lock); |
2483 | |
2484 | @@ -1228,6 +1230,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) |
2485 | goto err; |
2486 | |
2487 | bcache_device_attach(d, c, u - c->uuids); |
2488 | + bch_sectors_dirty_init(d); |
2489 | bch_flash_dev_request_init(d); |
2490 | add_disk(d->disk); |
2491 | |
2492 | @@ -1964,6 +1967,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, |
2493 | else |
2494 | err = "device busy"; |
2495 | mutex_unlock(&bch_register_lock); |
2496 | + if (!IS_ERR(bdev)) |
2497 | + bdput(bdev); |
2498 | if (attr == &ksysfs_register_quiet) |
2499 | goto out; |
2500 | } |
2501 | diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c |
2502 | index f90f13616980..ab2f8ce1e3bc 100644 |
2503 | --- a/drivers/md/bcache/sysfs.c |
2504 | +++ b/drivers/md/bcache/sysfs.c |
2505 | @@ -192,7 +192,7 @@ STORE(__cached_dev) |
2506 | { |
2507 | struct cached_dev *dc = container_of(kobj, struct cached_dev, |
2508 | disk.kobj); |
2509 | - unsigned v = size; |
2510 | + ssize_t v = size; |
2511 | struct cache_set *c; |
2512 | struct kobj_uevent_env *env; |
2513 | |
2514 | @@ -227,7 +227,7 @@ STORE(__cached_dev) |
2515 | bch_cached_dev_run(dc); |
2516 | |
2517 | if (attr == &sysfs_cache_mode) { |
2518 | - ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1); |
2519 | + v = bch_read_string_list(buf, bch_cache_modes + 1); |
2520 | |
2521 | if (v < 0) |
2522 | return v; |
2523 | diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c |
2524 | index 8c3a938f4bf0..176d3c2ef5f5 100644 |
2525 | --- a/drivers/md/bcache/util.c |
2526 | +++ b/drivers/md/bcache/util.c |
2527 | @@ -74,24 +74,44 @@ STRTO_H(strtouint, unsigned int) |
2528 | STRTO_H(strtoll, long long) |
2529 | STRTO_H(strtoull, unsigned long long) |
2530 | |
2531 | +/** |
2532 | + * bch_hprint() - formats @v to human readable string for sysfs. |
2533 | + * |
2534 | + * @v - signed 64 bit integer |
2535 | + * @buf - the (at least 8 byte) buffer to format the result into. |
2536 | + * |
2537 | + * Returns the number of bytes used by format. |
2538 | + */ |
2539 | ssize_t bch_hprint(char *buf, int64_t v) |
2540 | { |
2541 | static const char units[] = "?kMGTPEZY"; |
2542 | - char dec[4] = ""; |
2543 | - int u, t = 0; |
2544 | - |
2545 | - for (u = 0; v >= 1024 || v <= -1024; u++) { |
2546 | - t = v & ~(~0 << 10); |
2547 | - v >>= 10; |
2548 | - } |
2549 | - |
2550 | - if (!u) |
2551 | - return sprintf(buf, "%llu", v); |
2552 | - |
2553 | - if (v < 100 && v > -100) |
2554 | - snprintf(dec, sizeof(dec), ".%i", t / 100); |
2555 | - |
2556 | - return sprintf(buf, "%lli%s%c", v, dec, units[u]); |
2557 | + int u = 0, t; |
2558 | + |
2559 | + uint64_t q; |
2560 | + |
2561 | + if (v < 0) |
2562 | + q = -v; |
2563 | + else |
2564 | + q = v; |
2565 | + |
2566 | + /* For as long as the number is more than 3 digits, but at least |
2567 | + * once, shift right / divide by 1024. Keep the remainder for |
2568 | + * a digit after the decimal point. |
2569 | + */ |
2570 | + do { |
2571 | + u++; |
2572 | + |
2573 | + t = q & ~(~0 << 10); |
2574 | + q >>= 10; |
2575 | + } while (q >= 1000); |
2576 | + |
2577 | + if (v < 0) |
2578 | + /* '-', up to 3 digits, '.', 1 digit, 1 character, null; |
2579 | + * yields 8 bytes. |
2580 | + */ |
2581 | + return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]); |
2582 | + else |
2583 | + return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]); |
2584 | } |
2585 | |
2586 | ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[], |
2587 | diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c |
2588 | index 42c66e76f05e..a635d6ac7fde 100644 |
2589 | --- a/drivers/md/bcache/writeback.c |
2590 | +++ b/drivers/md/bcache/writeback.c |
2591 | @@ -21,7 +21,8 @@ |
2592 | static void __update_writeback_rate(struct cached_dev *dc) |
2593 | { |
2594 | struct cache_set *c = dc->disk.c; |
2595 | - uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size; |
2596 | + uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size - |
2597 | + bcache_flash_devs_sectors_dirty(c); |
2598 | uint64_t cache_dirty_target = |
2599 | div_u64(cache_sectors * dc->writeback_percent, 100); |
2600 | |
2601 | @@ -186,7 +187,7 @@ static void write_dirty(struct closure *cl) |
2602 | |
2603 | closure_bio_submit(&io->bio, cl); |
2604 | |
2605 | - continue_at(cl, write_dirty_finish, system_wq); |
2606 | + continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq); |
2607 | } |
2608 | |
2609 | static void read_dirty_endio(struct bio *bio) |
2610 | @@ -206,7 +207,7 @@ static void read_dirty_submit(struct closure *cl) |
2611 | |
2612 | closure_bio_submit(&io->bio, cl); |
2613 | |
2614 | - continue_at(cl, write_dirty, system_wq); |
2615 | + continue_at(cl, write_dirty, io->dc->writeback_write_wq); |
2616 | } |
2617 | |
2618 | static void read_dirty(struct cached_dev *dc) |
2619 | @@ -482,17 +483,17 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, |
2620 | return MAP_CONTINUE; |
2621 | } |
2622 | |
2623 | -void bch_sectors_dirty_init(struct cached_dev *dc) |
2624 | +void bch_sectors_dirty_init(struct bcache_device *d) |
2625 | { |
2626 | struct sectors_dirty_init op; |
2627 | |
2628 | bch_btree_op_init(&op.op, -1); |
2629 | - op.inode = dc->disk.id; |
2630 | + op.inode = d->id; |
2631 | |
2632 | - bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), |
2633 | + bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0), |
2634 | sectors_dirty_init_fn, 0); |
2635 | |
2636 | - dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); |
2637 | + d->sectors_dirty_last = bcache_dev_sectors_dirty(d); |
2638 | } |
2639 | |
2640 | void bch_cached_dev_writeback_init(struct cached_dev *dc) |
2641 | @@ -516,6 +517,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) |
2642 | |
2643 | int bch_cached_dev_writeback_start(struct cached_dev *dc) |
2644 | { |
2645 | + dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq", |
2646 | + WQ_MEM_RECLAIM, 0); |
2647 | + if (!dc->writeback_write_wq) |
2648 | + return -ENOMEM; |
2649 | + |
2650 | dc->writeback_thread = kthread_create(bch_writeback_thread, dc, |
2651 | "bcache_writeback"); |
2652 | if (IS_ERR(dc->writeback_thread)) |
2653 | diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h |
2654 | index 629bd1a502fd..e35421d20d2e 100644 |
2655 | --- a/drivers/md/bcache/writeback.h |
2656 | +++ b/drivers/md/bcache/writeback.h |
2657 | @@ -14,6 +14,25 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) |
2658 | return ret; |
2659 | } |
2660 | |
2661 | +static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c) |
2662 | +{ |
2663 | + uint64_t i, ret = 0; |
2664 | + |
2665 | + mutex_lock(&bch_register_lock); |
2666 | + |
2667 | + for (i = 0; i < c->nr_uuids; i++) { |
2668 | + struct bcache_device *d = c->devices[i]; |
2669 | + |
2670 | + if (!d || !UUID_FLASH_ONLY(&c->uuids[i])) |
2671 | + continue; |
2672 | + ret += bcache_dev_sectors_dirty(d); |
2673 | + } |
2674 | + |
2675 | + mutex_unlock(&bch_register_lock); |
2676 | + |
2677 | + return ret; |
2678 | +} |
2679 | + |
2680 | static inline unsigned offset_to_stripe(struct bcache_device *d, |
2681 | uint64_t offset) |
2682 | { |
2683 | @@ -84,7 +103,7 @@ static inline void bch_writeback_add(struct cached_dev *dc) |
2684 | |
2685 | void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); |
2686 | |
2687 | -void bch_sectors_dirty_init(struct cached_dev *dc); |
2688 | +void bch_sectors_dirty_init(struct bcache_device *); |
2689 | void bch_cached_dev_writeback_init(struct cached_dev *); |
2690 | int bch_cached_dev_writeback_start(struct cached_dev *); |
2691 | |
2692 | diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c |
2693 | index 40f3cd7eab0f..d2121637b4ab 100644 |
2694 | --- a/drivers/md/bitmap.c |
2695 | +++ b/drivers/md/bitmap.c |
2696 | @@ -625,7 +625,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) |
2697 | err = read_sb_page(bitmap->mddev, |
2698 | offset, |
2699 | sb_page, |
2700 | - 0, sizeof(bitmap_super_t)); |
2701 | + 0, PAGE_SIZE); |
2702 | } |
2703 | if (err) |
2704 | return err; |
2705 | @@ -2058,6 +2058,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, |
2706 | long pages; |
2707 | struct bitmap_page *new_bp; |
2708 | |
2709 | + if (bitmap->storage.file && !init) { |
2710 | + pr_info("md: cannot resize file-based bitmap\n"); |
2711 | + return -EINVAL; |
2712 | + } |
2713 | + |
2714 | if (chunksize == 0) { |
2715 | /* If there is enough space, leave the chunk size unchanged, |
2716 | * else increase by factor of two until there is enough space. |
2717 | @@ -2118,7 +2123,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, |
2718 | if (store.sb_page && bitmap->storage.sb_page) |
2719 | memcpy(page_address(store.sb_page), |
2720 | page_address(bitmap->storage.sb_page), |
2721 | - sizeof(bitmap_super_t)); |
2722 | + PAGE_SIZE); |
2723 | bitmap_file_unmap(&bitmap->storage); |
2724 | bitmap->storage = store; |
2725 | |
2726 | diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c |
2727 | index 78de7ddf5081..3df28f2f9b38 100644 |
2728 | --- a/drivers/media/i2c/adv7180.c |
2729 | +++ b/drivers/media/i2c/adv7180.c |
2730 | @@ -1402,6 +1402,8 @@ static int adv7180_remove(struct i2c_client *client) |
2731 | |
2732 | static const struct i2c_device_id adv7180_id[] = { |
2733 | { "adv7180", (kernel_ulong_t)&adv7180_info }, |
2734 | + { "adv7180cp", (kernel_ulong_t)&adv7180_info }, |
2735 | + { "adv7180st", (kernel_ulong_t)&adv7180_info }, |
2736 | { "adv7182", (kernel_ulong_t)&adv7182_info }, |
2737 | { "adv7280", (kernel_ulong_t)&adv7280_info }, |
2738 | { "adv7280-m", (kernel_ulong_t)&adv7280_m_info }, |
2739 | diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c |
2740 | index 5f4434c0a8f1..2d6187904552 100644 |
2741 | --- a/drivers/media/platform/qcom/venus/helpers.c |
2742 | +++ b/drivers/media/platform/qcom/venus/helpers.c |
2743 | @@ -243,7 +243,7 @@ static void return_buf_error(struct venus_inst *inst, |
2744 | if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) |
2745 | v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf); |
2746 | else |
2747 | - v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf); |
2748 | + v4l2_m2m_dst_buf_remove_by_buf(m2m_ctx, vbuf); |
2749 | |
2750 | v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); |
2751 | } |
2752 | diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c |
2753 | index db1e7b70c998..9080e39ea391 100644 |
2754 | --- a/drivers/media/rc/lirc_dev.c |
2755 | +++ b/drivers/media/rc/lirc_dev.c |
2756 | @@ -59,6 +59,8 @@ static void lirc_release(struct device *ld) |
2757 | { |
2758 | struct irctl *ir = container_of(ld, struct irctl, dev); |
2759 | |
2760 | + put_device(ir->dev.parent); |
2761 | + |
2762 | if (ir->buf_internal) { |
2763 | lirc_buffer_free(ir->buf); |
2764 | kfree(ir->buf); |
2765 | @@ -218,6 +220,8 @@ int lirc_register_driver(struct lirc_driver *d) |
2766 | |
2767 | mutex_unlock(&lirc_dev_lock); |
2768 | |
2769 | + get_device(ir->dev.parent); |
2770 | + |
2771 | dev_info(ir->d.dev, "lirc_dev: driver %s registered at minor = %d\n", |
2772 | ir->d.name, ir->d.minor); |
2773 | |
2774 | diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c |
2775 | index c2ee6e39fd0c..20397aba6849 100644 |
2776 | --- a/drivers/media/usb/uvc/uvc_ctrl.c |
2777 | +++ b/drivers/media/usb/uvc/uvc_ctrl.c |
2778 | @@ -2002,6 +2002,13 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain, |
2779 | goto done; |
2780 | } |
2781 | |
2782 | + /* Validate the user-provided bit-size and offset */ |
2783 | + if (mapping->size > 32 || |
2784 | + mapping->offset + mapping->size > ctrl->info.size * 8) { |
2785 | + ret = -EINVAL; |
2786 | + goto done; |
2787 | + } |
2788 | + |
2789 | list_for_each_entry(map, &ctrl->info.mappings, list) { |
2790 | if (mapping->id == map->id) { |
2791 | uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', " |
2792 | diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c |
2793 | index 6f52970f8b54..0c14e995667c 100644 |
2794 | --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c |
2795 | +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c |
2796 | @@ -796,7 +796,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u |
2797 | copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || |
2798 | put_user(kp->pending, &up->pending) || |
2799 | put_user(kp->sequence, &up->sequence) || |
2800 | - compat_put_timespec(&kp->timestamp, &up->timestamp) || |
2801 | + put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || |
2802 | + put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) || |
2803 | put_user(kp->id, &up->id) || |
2804 | copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32))) |
2805 | return -EFAULT; |
2806 | diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c |
2807 | index 1a138c83f877..a0c44d16bf30 100644 |
2808 | --- a/drivers/misc/cxl/api.c |
2809 | +++ b/drivers/misc/cxl/api.c |
2810 | @@ -336,6 +336,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, |
2811 | mmput(ctx->mm); |
2812 | } |
2813 | |
2814 | + /* |
2815 | + * Increment driver use count. Enables global TLBIs for hash |
2816 | + * and callbacks to handle the segment table |
2817 | + */ |
2818 | cxl_ctx_get(); |
2819 | |
2820 | if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { |
2821 | diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c |
2822 | index 0761271d68c5..4bfad9f6dc9f 100644 |
2823 | --- a/drivers/misc/cxl/file.c |
2824 | +++ b/drivers/misc/cxl/file.c |
2825 | @@ -95,7 +95,6 @@ static int __afu_open(struct inode *inode, struct file *file, bool master) |
2826 | |
2827 | pr_devel("afu_open pe: %i\n", ctx->pe); |
2828 | file->private_data = ctx; |
2829 | - cxl_ctx_get(); |
2830 | |
2831 | /* indicate success */ |
2832 | rc = 0; |
2833 | @@ -225,6 +224,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, |
2834 | if (ctx->mm) |
2835 | mmput(ctx->mm); |
2836 | |
2837 | + /* |
2838 | + * Increment driver use count. Enables global TLBIs for hash |
2839 | + * and callbacks to handle the segment table |
2840 | + */ |
2841 | + cxl_ctx_get(); |
2842 | + |
2843 | trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); |
2844 | |
2845 | if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, |
2846 | @@ -233,6 +238,7 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, |
2847 | cxl_adapter_context_put(ctx->afu->adapter); |
2848 | put_pid(ctx->pid); |
2849 | ctx->pid = NULL; |
2850 | + cxl_ctx_put(); |
2851 | cxl_context_mm_count_put(ctx); |
2852 | goto out; |
2853 | } |
2854 | diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c |
2855 | index 517a315e259b..35bd50bcbbd5 100644 |
2856 | --- a/drivers/net/wireless/ath/wcn36xx/main.c |
2857 | +++ b/drivers/net/wireless/ath/wcn36xx/main.c |
2858 | @@ -372,6 +372,8 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) |
2859 | |
2860 | wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed); |
2861 | |
2862 | + mutex_lock(&wcn->conf_mutex); |
2863 | + |
2864 | if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { |
2865 | int ch = WCN36XX_HW_CHANNEL(wcn); |
2866 | wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n", |
2867 | @@ -382,6 +384,8 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) |
2868 | } |
2869 | } |
2870 | |
2871 | + mutex_unlock(&wcn->conf_mutex); |
2872 | + |
2873 | return 0; |
2874 | } |
2875 | |
2876 | @@ -396,6 +400,8 @@ static void wcn36xx_configure_filter(struct ieee80211_hw *hw, |
2877 | |
2878 | wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n"); |
2879 | |
2880 | + mutex_lock(&wcn->conf_mutex); |
2881 | + |
2882 | *total &= FIF_ALLMULTI; |
2883 | |
2884 | fp = (void *)(unsigned long)multicast; |
2885 | @@ -408,6 +414,8 @@ static void wcn36xx_configure_filter(struct ieee80211_hw *hw, |
2886 | else if (NL80211_IFTYPE_STATION == vif->type && tmp->sta_assoc) |
2887 | wcn36xx_smd_set_mc_list(wcn, vif, fp); |
2888 | } |
2889 | + |
2890 | + mutex_unlock(&wcn->conf_mutex); |
2891 | kfree(fp); |
2892 | } |
2893 | |
2894 | @@ -471,6 +479,8 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, |
2895 | key_conf->key, |
2896 | key_conf->keylen); |
2897 | |
2898 | + mutex_lock(&wcn->conf_mutex); |
2899 | + |
2900 | switch (key_conf->cipher) { |
2901 | case WLAN_CIPHER_SUITE_WEP40: |
2902 | vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40; |
2903 | @@ -565,6 +575,8 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, |
2904 | } |
2905 | |
2906 | out: |
2907 | + mutex_unlock(&wcn->conf_mutex); |
2908 | + |
2909 | return ret; |
2910 | } |
2911 | |
2912 | @@ -725,6 +737,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, |
2913 | wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n", |
2914 | vif, changed); |
2915 | |
2916 | + mutex_lock(&wcn->conf_mutex); |
2917 | + |
2918 | if (changed & BSS_CHANGED_BEACON_INFO) { |
2919 | wcn36xx_dbg(WCN36XX_DBG_MAC, |
2920 | "mac bss changed dtim period %d\n", |
2921 | @@ -787,7 +801,13 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, |
2922 | bss_conf->aid); |
2923 | |
2924 | vif_priv->sta_assoc = true; |
2925 | - rcu_read_lock(); |
2926 | + |
2927 | + /* |
2928 | + * Holding conf_mutex ensures mutal exclusion with |
2929 | + * wcn36xx_sta_remove() and as such ensures that sta |
2930 | + * won't be freed while we're operating on it. As such |
2931 | + * we do not need to hold the rcu_read_lock(). |
2932 | + */ |
2933 | sta = ieee80211_find_sta(vif, bss_conf->bssid); |
2934 | if (!sta) { |
2935 | wcn36xx_err("sta %pM is not found\n", |
2936 | @@ -811,7 +831,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, |
2937 | * place where AID is available. |
2938 | */ |
2939 | wcn36xx_smd_config_sta(wcn, vif, sta); |
2940 | - rcu_read_unlock(); |
2941 | } else { |
2942 | wcn36xx_dbg(WCN36XX_DBG_MAC, |
2943 | "disassociated bss %pM vif %pM AID=%d\n", |
2944 | @@ -873,6 +892,9 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, |
2945 | } |
2946 | } |
2947 | out: |
2948 | + |
2949 | + mutex_unlock(&wcn->conf_mutex); |
2950 | + |
2951 | return; |
2952 | } |
2953 | |
2954 | @@ -882,7 +904,10 @@ static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value) |
2955 | struct wcn36xx *wcn = hw->priv; |
2956 | wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value); |
2957 | |
2958 | + mutex_lock(&wcn->conf_mutex); |
2959 | wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value); |
2960 | + mutex_unlock(&wcn->conf_mutex); |
2961 | + |
2962 | return 0; |
2963 | } |
2964 | |
2965 | @@ -893,8 +918,12 @@ static void wcn36xx_remove_interface(struct ieee80211_hw *hw, |
2966 | struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); |
2967 | wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif); |
2968 | |
2969 | + mutex_lock(&wcn->conf_mutex); |
2970 | + |
2971 | list_del(&vif_priv->list); |
2972 | wcn36xx_smd_delete_sta_self(wcn, vif->addr); |
2973 | + |
2974 | + mutex_unlock(&wcn->conf_mutex); |
2975 | } |
2976 | |
2977 | static int wcn36xx_add_interface(struct ieee80211_hw *hw, |
2978 | @@ -915,9 +944,13 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw, |
2979 | return -EOPNOTSUPP; |
2980 | } |
2981 | |
2982 | + mutex_lock(&wcn->conf_mutex); |
2983 | + |
2984 | list_add(&vif_priv->list, &wcn->vif_list); |
2985 | wcn36xx_smd_add_sta_self(wcn, vif); |
2986 | |
2987 | + mutex_unlock(&wcn->conf_mutex); |
2988 | + |
2989 | return 0; |
2990 | } |
2991 | |
2992 | @@ -930,6 +963,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
2993 | wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n", |
2994 | vif, sta->addr); |
2995 | |
2996 | + mutex_lock(&wcn->conf_mutex); |
2997 | + |
2998 | spin_lock_init(&sta_priv->ampdu_lock); |
2999 | sta_priv->vif = vif_priv; |
3000 | /* |
3001 | @@ -941,6 +976,9 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
3002 | sta_priv->aid = sta->aid; |
3003 | wcn36xx_smd_config_sta(wcn, vif, sta); |
3004 | } |
3005 | + |
3006 | + mutex_unlock(&wcn->conf_mutex); |
3007 | + |
3008 | return 0; |
3009 | } |
3010 | |
3011 | @@ -954,8 +992,13 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw, |
3012 | wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n", |
3013 | vif, sta->addr, sta_priv->sta_index); |
3014 | |
3015 | + mutex_lock(&wcn->conf_mutex); |
3016 | + |
3017 | wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index); |
3018 | sta_priv->vif = NULL; |
3019 | + |
3020 | + mutex_unlock(&wcn->conf_mutex); |
3021 | + |
3022 | return 0; |
3023 | } |
3024 | |
3025 | @@ -999,6 +1042,8 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, |
3026 | wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n", |
3027 | action, tid); |
3028 | |
3029 | + mutex_lock(&wcn->conf_mutex); |
3030 | + |
3031 | switch (action) { |
3032 | case IEEE80211_AMPDU_RX_START: |
3033 | sta_priv->tid = tid; |
3034 | @@ -1038,6 +1083,8 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, |
3035 | wcn36xx_err("Unknown AMPDU action\n"); |
3036 | } |
3037 | |
3038 | + mutex_unlock(&wcn->conf_mutex); |
3039 | + |
3040 | return 0; |
3041 | } |
3042 | |
3043 | @@ -1216,6 +1263,7 @@ static int wcn36xx_probe(struct platform_device *pdev) |
3044 | wcn = hw->priv; |
3045 | wcn->hw = hw; |
3046 | wcn->dev = &pdev->dev; |
3047 | + mutex_init(&wcn->conf_mutex); |
3048 | mutex_init(&wcn->hal_mutex); |
3049 | mutex_init(&wcn->scan_lock); |
3050 | |
3051 | diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h |
3052 | index b52b4da9a967..6aefba4c0cda 100644 |
3053 | --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h |
3054 | +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h |
3055 | @@ -202,6 +202,9 @@ struct wcn36xx { |
3056 | struct qcom_smem_state *tx_rings_empty_state; |
3057 | unsigned tx_rings_empty_state_bit; |
3058 | |
3059 | + /* prevents concurrent FW reconfiguration */ |
3060 | + struct mutex conf_mutex; |
3061 | + |
3062 | /* |
3063 | * smd_buf must be protected with smd_mutex to garantee |
3064 | * that all messages are sent one after another |
3065 | diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c |
3066 | index 3ee6767392b6..d25bad052d78 100644 |
3067 | --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c |
3068 | +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c |
3069 | @@ -79,6 +79,7 @@ |
3070 | /* NVM offsets (in words) definitions */ |
3071 | enum wkp_nvm_offsets { |
3072 | /* NVM HW-Section offset (in words) definitions */ |
3073 | + SUBSYSTEM_ID = 0x0A, |
3074 | HW_ADDR = 0x15, |
3075 | |
3076 | /* NVM SW-Section offset (in words) definitions */ |
3077 | @@ -254,13 +255,12 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, |
3078 | static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, |
3079 | struct iwl_nvm_data *data, |
3080 | const __le16 * const nvm_ch_flags, |
3081 | - bool lar_supported) |
3082 | + bool lar_supported, bool no_wide_in_5ghz) |
3083 | { |
3084 | int ch_idx; |
3085 | int n_channels = 0; |
3086 | struct ieee80211_channel *channel; |
3087 | u16 ch_flags; |
3088 | - bool is_5ghz; |
3089 | int num_of_ch, num_2ghz_channels; |
3090 | const u8 *nvm_chan; |
3091 | |
3092 | @@ -275,12 +275,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, |
3093 | } |
3094 | |
3095 | for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { |
3096 | + bool is_5ghz = (ch_idx >= num_2ghz_channels); |
3097 | + |
3098 | ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); |
3099 | |
3100 | - if (ch_idx >= num_2ghz_channels && |
3101 | - !data->sku_cap_band_52GHz_enable) |
3102 | + if (is_5ghz && !data->sku_cap_band_52GHz_enable) |
3103 | continue; |
3104 | |
3105 | + /* workaround to disable wide channels in 5GHz */ |
3106 | + if (no_wide_in_5ghz && is_5ghz) { |
3107 | + ch_flags &= ~(NVM_CHANNEL_40MHZ | |
3108 | + NVM_CHANNEL_80MHZ | |
3109 | + NVM_CHANNEL_160MHZ); |
3110 | + } |
3111 | + |
3112 | if (ch_flags & NVM_CHANNEL_160MHZ) |
3113 | data->vht160_supported = true; |
3114 | |
3115 | @@ -303,8 +311,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, |
3116 | n_channels++; |
3117 | |
3118 | channel->hw_value = nvm_chan[ch_idx]; |
3119 | - channel->band = (ch_idx < num_2ghz_channels) ? |
3120 | - NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; |
3121 | + channel->band = is_5ghz ? |
3122 | + NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; |
3123 | channel->center_freq = |
3124 | ieee80211_channel_to_frequency( |
3125 | channel->hw_value, channel->band); |
3126 | @@ -316,7 +324,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, |
3127 | * is not used in mvm, and is used for backwards compatibility |
3128 | */ |
3129 | channel->max_power = IWL_DEFAULT_MAX_TX_POWER; |
3130 | - is_5ghz = channel->band == NL80211_BAND_5GHZ; |
3131 | |
3132 | /* don't put limitations in case we're using LAR */ |
3133 | if (!lar_supported) |
3134 | @@ -432,14 +439,15 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, |
3135 | |
3136 | void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, |
3137 | struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, |
3138 | - u8 tx_chains, u8 rx_chains, bool lar_supported) |
3139 | + u8 tx_chains, u8 rx_chains, bool lar_supported, |
3140 | + bool no_wide_in_5ghz) |
3141 | { |
3142 | int n_channels; |
3143 | int n_used = 0; |
3144 | struct ieee80211_supported_band *sband; |
3145 | |
3146 | n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, |
3147 | - lar_supported); |
3148 | + lar_supported, no_wide_in_5ghz); |
3149 | sband = &data->bands[NL80211_BAND_2GHZ]; |
3150 | sband->band = NL80211_BAND_2GHZ; |
3151 | sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; |
3152 | @@ -645,6 +653,39 @@ static int iwl_set_hw_address(struct iwl_trans *trans, |
3153 | return 0; |
3154 | } |
3155 | |
3156 | +static bool |
3157 | +iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg, |
3158 | + const __le16 *nvm_hw) |
3159 | +{ |
3160 | + /* |
3161 | + * Workaround a bug in Indonesia SKUs where the regulatory in |
3162 | + * some 7000-family OTPs erroneously allow wide channels in |
3163 | + * 5GHz. To check for Indonesia, we take the SKU value from |
3164 | + * bits 1-4 in the subsystem ID and check if it is either 5 or |
3165 | + * 9. In those cases, we need to force-disable wide channels |
3166 | + * in 5GHz otherwise the FW will throw a sysassert when we try |
3167 | + * to use them. |
3168 | + */ |
3169 | + if (cfg->device_family == IWL_DEVICE_FAMILY_7000) { |
3170 | + /* |
3171 | + * Unlike the other sections in the NVM, the hw |
3172 | + * section uses big-endian. |
3173 | + */ |
3174 | + u16 subsystem_id = be16_to_cpup((const __be16 *)nvm_hw |
3175 | + + SUBSYSTEM_ID); |
3176 | + u8 sku = (subsystem_id & 0x1e) >> 1; |
3177 | + |
3178 | + if (sku == 5 || sku == 9) { |
3179 | + IWL_DEBUG_EEPROM(dev, |
3180 | + "disabling wide channels in 5GHz (0x%0x %d)\n", |
3181 | + subsystem_id, sku); |
3182 | + return true; |
3183 | + } |
3184 | + } |
3185 | + |
3186 | + return false; |
3187 | +} |
3188 | + |
3189 | struct iwl_nvm_data * |
3190 | iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, |
3191 | const __le16 *nvm_hw, const __le16 *nvm_sw, |
3192 | @@ -655,6 +696,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, |
3193 | struct device *dev = trans->dev; |
3194 | struct iwl_nvm_data *data; |
3195 | bool lar_enabled; |
3196 | + bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw); |
3197 | u32 sku, radio_cfg; |
3198 | u16 lar_config; |
3199 | const __le16 *ch_section; |
3200 | @@ -725,7 +767,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, |
3201 | } |
3202 | |
3203 | iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains, |
3204 | - lar_fw_supported && lar_enabled); |
3205 | + lar_fw_supported && lar_enabled, no_wide_in_5ghz); |
3206 | data->calib_version = 255; |
3207 | |
3208 | return data; |
3209 | diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h |
3210 | index 3fd6506a02ab..50d9b3eaa4f8 100644 |
3211 | --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h |
3212 | +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h |
3213 | @@ -93,7 +93,8 @@ void iwl_set_hw_address_from_csr(struct iwl_trans *trans, |
3214 | */ |
3215 | void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, |
3216 | struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, |
3217 | - u8 tx_chains, u8 rx_chains, bool lar_supported); |
3218 | + u8 tx_chains, u8 rx_chains, bool lar_supported, |
3219 | + bool no_wide_in_5ghz); |
3220 | |
3221 | /** |
3222 | * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW |
3223 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c |
3224 | index dac7e542a190..4de565cec747 100644 |
3225 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c |
3226 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c |
3227 | @@ -628,7 +628,8 @@ int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm) |
3228 | rsp->regulatory.channel_profile, |
3229 | mvm->nvm_data->valid_tx_ant & mvm->fw->valid_tx_ant, |
3230 | mvm->nvm_data->valid_rx_ant & mvm->fw->valid_rx_ant, |
3231 | - rsp->regulatory.lar_enabled && lar_fw_supported); |
3232 | + rsp->regulatory.lar_enabled && lar_fw_supported, |
3233 | + false); |
3234 | |
3235 | iwl_free_resp(&hcmd); |
3236 | return 0; |
3237 | diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c |
3238 | index 026830a138ae..e5d5ce9e3010 100644 |
3239 | --- a/drivers/pci/hotplug/pciehp_hpc.c |
3240 | +++ b/drivers/pci/hotplug/pciehp_hpc.c |
3241 | @@ -586,6 +586,14 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) |
3242 | events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | |
3243 | PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | |
3244 | PCI_EXP_SLTSTA_DLLSC); |
3245 | + |
3246 | + /* |
3247 | + * If we've already reported a power fault, don't report it again |
3248 | + * until we've done something to handle it. |
3249 | + */ |
3250 | + if (ctrl->power_fault_detected) |
3251 | + events &= ~PCI_EXP_SLTSTA_PFD; |
3252 | + |
3253 | if (!events) |
3254 | return IRQ_NONE; |
3255 | |
3256 | diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c |
3257 | index de0ea474fb73..e5824c7b7b6b 100644 |
3258 | --- a/drivers/pci/hotplug/shpchp_hpc.c |
3259 | +++ b/drivers/pci/hotplug/shpchp_hpc.c |
3260 | @@ -1062,6 +1062,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) |
3261 | if (rc) { |
3262 | ctrl_info(ctrl, "Can't get msi for the hotplug controller\n"); |
3263 | ctrl_info(ctrl, "Use INTx for the hotplug controller\n"); |
3264 | + } else { |
3265 | + pci_set_master(pdev); |
3266 | } |
3267 | |
3268 | rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, |
3269 | diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c |
3270 | index e6779d4352a2..7c30fd986560 100644 |
3271 | --- a/drivers/pinctrl/pinctrl-amd.c |
3272 | +++ b/drivers/pinctrl/pinctrl-amd.c |
3273 | @@ -36,6 +36,7 @@ |
3274 | #include <linux/pinctrl/pinconf.h> |
3275 | #include <linux/pinctrl/pinconf-generic.h> |
3276 | |
3277 | +#include "core.h" |
3278 | #include "pinctrl-utils.h" |
3279 | #include "pinctrl-amd.h" |
3280 | |
3281 | @@ -725,6 +726,69 @@ static const struct pinconf_ops amd_pinconf_ops = { |
3282 | .pin_config_group_set = amd_pinconf_group_set, |
3283 | }; |
3284 | |
3285 | +#ifdef CONFIG_PM_SLEEP |
3286 | +static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin) |
3287 | +{ |
3288 | + const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin); |
3289 | + |
3290 | + if (!pd) |
3291 | + return false; |
3292 | + |
3293 | + /* |
3294 | + * Only restore the pin if it is actually in use by the kernel (or |
3295 | + * by userspace). |
3296 | + */ |
3297 | + if (pd->mux_owner || pd->gpio_owner || |
3298 | + gpiochip_line_is_irq(&gpio_dev->gc, pin)) |
3299 | + return true; |
3300 | + |
3301 | + return false; |
3302 | +} |
3303 | + |
3304 | +int amd_gpio_suspend(struct device *dev) |
3305 | +{ |
3306 | + struct platform_device *pdev = to_platform_device(dev); |
3307 | + struct amd_gpio *gpio_dev = platform_get_drvdata(pdev); |
3308 | + struct pinctrl_desc *desc = gpio_dev->pctrl->desc; |
3309 | + int i; |
3310 | + |
3311 | + for (i = 0; i < desc->npins; i++) { |
3312 | + int pin = desc->pins[i].number; |
3313 | + |
3314 | + if (!amd_gpio_should_save(gpio_dev, pin)) |
3315 | + continue; |
3316 | + |
3317 | + gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4); |
3318 | + } |
3319 | + |
3320 | + return 0; |
3321 | +} |
3322 | + |
3323 | +int amd_gpio_resume(struct device *dev) |
3324 | +{ |
3325 | + struct platform_device *pdev = to_platform_device(dev); |
3326 | + struct amd_gpio *gpio_dev = platform_get_drvdata(pdev); |
3327 | + struct pinctrl_desc *desc = gpio_dev->pctrl->desc; |
3328 | + int i; |
3329 | + |
3330 | + for (i = 0; i < desc->npins; i++) { |
3331 | + int pin = desc->pins[i].number; |
3332 | + |
3333 | + if (!amd_gpio_should_save(gpio_dev, pin)) |
3334 | + continue; |
3335 | + |
3336 | + writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4); |
3337 | + } |
3338 | + |
3339 | + return 0; |
3340 | +} |
3341 | + |
3342 | +static const struct dev_pm_ops amd_gpio_pm_ops = { |
3343 | + SET_LATE_SYSTEM_SLEEP_PM_OPS(amd_gpio_suspend, |
3344 | + amd_gpio_resume) |
3345 | +}; |
3346 | +#endif |
3347 | + |
3348 | static struct pinctrl_desc amd_pinctrl_desc = { |
3349 | .pins = kerncz_pins, |
3350 | .npins = ARRAY_SIZE(kerncz_pins), |
3351 | @@ -764,6 +828,14 @@ static int amd_gpio_probe(struct platform_device *pdev) |
3352 | return -EINVAL; |
3353 | } |
3354 | |
3355 | +#ifdef CONFIG_PM_SLEEP |
3356 | + gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins, |
3357 | + sizeof(*gpio_dev->saved_regs), |
3358 | + GFP_KERNEL); |
3359 | + if (!gpio_dev->saved_regs) |
3360 | + return -ENOMEM; |
3361 | +#endif |
3362 | + |
3363 | gpio_dev->pdev = pdev; |
3364 | gpio_dev->gc.direction_input = amd_gpio_direction_input; |
3365 | gpio_dev->gc.direction_output = amd_gpio_direction_output; |
3366 | @@ -853,6 +925,9 @@ static struct platform_driver amd_gpio_driver = { |
3367 | .driver = { |
3368 | .name = "amd_gpio", |
3369 | .acpi_match_table = ACPI_PTR(amd_gpio_acpi_match), |
3370 | +#ifdef CONFIG_PM_SLEEP |
3371 | + .pm = &amd_gpio_pm_ops, |
3372 | +#endif |
3373 | }, |
3374 | .probe = amd_gpio_probe, |
3375 | .remove = amd_gpio_remove, |
3376 | diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h |
3377 | index 5b1cb965c767..8fa453a59da5 100644 |
3378 | --- a/drivers/pinctrl/pinctrl-amd.h |
3379 | +++ b/drivers/pinctrl/pinctrl-amd.h |
3380 | @@ -97,6 +97,7 @@ struct amd_gpio { |
3381 | unsigned int hwbank_num; |
3382 | struct resource *res; |
3383 | struct platform_device *pdev; |
3384 | + u32 *saved_regs; |
3385 | }; |
3386 | |
3387 | /* KERNCZ configuration*/ |
3388 | diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c |
3389 | index 731530a9ce38..9ab8faf528a6 100644 |
3390 | --- a/drivers/pinctrl/samsung/pinctrl-exynos.c |
3391 | +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c |
3392 | @@ -174,10 +174,10 @@ static int exynos_irq_request_resources(struct irq_data *irqd) |
3393 | |
3394 | spin_lock_irqsave(&bank->slock, flags); |
3395 | |
3396 | - con = readl(bank->eint_base + reg_con); |
3397 | + con = readl(bank->pctl_base + reg_con); |
3398 | con &= ~(mask << shift); |
3399 | con |= EXYNOS_EINT_FUNC << shift; |
3400 | - writel(con, bank->eint_base + reg_con); |
3401 | + writel(con, bank->pctl_base + reg_con); |
3402 | |
3403 | spin_unlock_irqrestore(&bank->slock, flags); |
3404 | |
3405 | @@ -202,10 +202,10 @@ static void exynos_irq_release_resources(struct irq_data *irqd) |
3406 | |
3407 | spin_lock_irqsave(&bank->slock, flags); |
3408 | |
3409 | - con = readl(bank->eint_base + reg_con); |
3410 | + con = readl(bank->pctl_base + reg_con); |
3411 | con &= ~(mask << shift); |
3412 | con |= FUNC_INPUT << shift; |
3413 | - writel(con, bank->eint_base + reg_con); |
3414 | + writel(con, bank->pctl_base + reg_con); |
3415 | |
3416 | spin_unlock_irqrestore(&bank->slock, flags); |
3417 | |
3418 | diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c |
3419 | index 49774851e84a..edf27264b603 100644 |
3420 | --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c |
3421 | +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c |
3422 | @@ -151,7 +151,7 @@ static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d, |
3423 | u32 val; |
3424 | |
3425 | /* Make sure that pin is configured as interrupt */ |
3426 | - reg = bank->pctl_base + bank->pctl_offset; |
3427 | + reg = d->virt_base + bank->pctl_offset; |
3428 | shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC]; |
3429 | mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; |
3430 | |
3431 | @@ -184,7 +184,7 @@ static int s3c24xx_eint_type(struct irq_data *data, unsigned int type) |
3432 | s3c24xx_eint_set_handler(data, type); |
3433 | |
3434 | /* Set up interrupt trigger */ |
3435 | - reg = bank->eint_base + EINT_REG(index); |
3436 | + reg = d->virt_base + EINT_REG(index); |
3437 | shift = EINT_OFFS(index); |
3438 | |
3439 | val = readl(reg); |
3440 | @@ -259,29 +259,32 @@ static void s3c2410_demux_eint0_3(struct irq_desc *desc) |
3441 | static void s3c2412_eint0_3_ack(struct irq_data *data) |
3442 | { |
3443 | struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); |
3444 | + struct samsung_pinctrl_drv_data *d = bank->drvdata; |
3445 | |
3446 | unsigned long bitval = 1UL << data->hwirq; |
3447 | - writel(bitval, bank->eint_base + EINTPEND_REG); |
3448 | + writel(bitval, d->virt_base + EINTPEND_REG); |
3449 | } |
3450 | |
3451 | static void s3c2412_eint0_3_mask(struct irq_data *data) |
3452 | { |
3453 | struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); |
3454 | + struct samsung_pinctrl_drv_data *d = bank->drvdata; |
3455 | unsigned long mask; |
3456 | |
3457 | - mask = readl(bank->eint_base + EINTMASK_REG); |
3458 | + mask = readl(d->virt_base + EINTMASK_REG); |
3459 | mask |= (1UL << data->hwirq); |
3460 | - writel(mask, bank->eint_base + EINTMASK_REG); |
3461 | + writel(mask, d->virt_base + EINTMASK_REG); |
3462 | } |
3463 | |
3464 | static void s3c2412_eint0_3_unmask(struct irq_data *data) |
3465 | { |
3466 | struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); |
3467 | + struct samsung_pinctrl_drv_data *d = bank->drvdata; |
3468 | unsigned long mask; |
3469 | |
3470 | - mask = readl(bank->eint_base + EINTMASK_REG); |
3471 | + mask = readl(d->virt_base + EINTMASK_REG); |
3472 | mask &= ~(1UL << data->hwirq); |
3473 | - writel(mask, bank->eint_base + EINTMASK_REG); |
3474 | + writel(mask, d->virt_base + EINTMASK_REG); |
3475 | } |
3476 | |
3477 | static struct irq_chip s3c2412_eint0_3_chip = { |
3478 | @@ -316,31 +319,34 @@ static void s3c2412_demux_eint0_3(struct irq_desc *desc) |
3479 | static void s3c24xx_eint_ack(struct irq_data *data) |
3480 | { |
3481 | struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); |
3482 | + struct samsung_pinctrl_drv_data *d = bank->drvdata; |
3483 | unsigned char index = bank->eint_offset + data->hwirq; |
3484 | |
3485 | - writel(1UL << index, bank->eint_base + EINTPEND_REG); |
3486 | + writel(1UL << index, d->virt_base + EINTPEND_REG); |
3487 | } |
3488 | |
3489 | static void s3c24xx_eint_mask(struct irq_data *data) |
3490 | { |
3491 | struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); |
3492 | + struct samsung_pinctrl_drv_data *d = bank->drvdata; |
3493 | unsigned char index = bank->eint_offset + data->hwirq; |
3494 | unsigned long mask; |
3495 | |
3496 | - mask = readl(bank->eint_base + EINTMASK_REG); |
3497 | + mask = readl(d->virt_base + EINTMASK_REG); |
3498 | mask |= (1UL << index); |
3499 | - writel(mask, bank->eint_base + EINTMASK_REG); |
3500 | + writel(mask, d->virt_base + EINTMASK_REG); |
3501 | } |
3502 | |
3503 | static void s3c24xx_eint_unmask(struct irq_data *data) |
3504 | { |
3505 | struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); |
3506 | + struct samsung_pinctrl_drv_data *d = bank->drvdata; |
3507 | unsigned char index = bank->eint_offset + data->hwirq; |
3508 | unsigned long mask; |
3509 | |
3510 | - mask = readl(bank->eint_base + EINTMASK_REG); |
3511 | + mask = readl(d->virt_base + EINTMASK_REG); |
3512 | mask &= ~(1UL << index); |
3513 | - writel(mask, bank->eint_base + EINTMASK_REG); |
3514 | + writel(mask, d->virt_base + EINTMASK_REG); |
3515 | } |
3516 | |
3517 | static struct irq_chip s3c24xx_eint_chip = { |
3518 | @@ -356,14 +362,13 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc, |
3519 | { |
3520 | struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc); |
3521 | struct irq_chip *chip = irq_desc_get_chip(desc); |
3522 | - struct irq_data *irqd = irq_desc_get_irq_data(desc); |
3523 | - struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); |
3524 | + struct samsung_pinctrl_drv_data *d = data->drvdata; |
3525 | unsigned int pend, mask; |
3526 | |
3527 | chained_irq_enter(chip, desc); |
3528 | |
3529 | - pend = readl(bank->eint_base + EINTPEND_REG); |
3530 | - mask = readl(bank->eint_base + EINTMASK_REG); |
3531 | + pend = readl(d->virt_base + EINTPEND_REG); |
3532 | + mask = readl(d->virt_base + EINTMASK_REG); |
3533 | |
3534 | pend &= ~mask; |
3535 | pend &= range; |
3536 | diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c |
3537 | index 4a88d7446e87..e63663b32907 100644 |
3538 | --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c |
3539 | +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c |
3540 | @@ -280,7 +280,7 @@ static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d, |
3541 | u32 val; |
3542 | |
3543 | /* Make sure that pin is configured as interrupt */ |
3544 | - reg = bank->pctl_base + bank->pctl_offset; |
3545 | + reg = d->virt_base + bank->pctl_offset; |
3546 | shift = pin; |
3547 | if (bank_type->fld_width[PINCFG_TYPE_FUNC] * shift >= 32) { |
3548 | /* 4-bit bank type with 2 con regs */ |
3549 | @@ -308,8 +308,9 @@ static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d, |
3550 | static inline void s3c64xx_gpio_irq_set_mask(struct irq_data *irqd, bool mask) |
3551 | { |
3552 | struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); |
3553 | + struct samsung_pinctrl_drv_data *d = bank->drvdata; |
3554 | unsigned char index = EINT_OFFS(bank->eint_offset) + irqd->hwirq; |
3555 | - void __iomem *reg = bank->eint_base + EINTMASK_REG(bank->eint_offset); |
3556 | + void __iomem *reg = d->virt_base + EINTMASK_REG(bank->eint_offset); |
3557 | u32 val; |
3558 | |
3559 | val = readl(reg); |
3560 | @@ -333,8 +334,9 @@ static void s3c64xx_gpio_irq_mask(struct irq_data *irqd) |
3561 | static void s3c64xx_gpio_irq_ack(struct irq_data *irqd) |
3562 | { |
3563 | struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); |
3564 | + struct samsung_pinctrl_drv_data *d = bank->drvdata; |
3565 | unsigned char index = EINT_OFFS(bank->eint_offset) + irqd->hwirq; |
3566 | - void __iomem *reg = bank->eint_base + EINTPEND_REG(bank->eint_offset); |
3567 | + void __iomem *reg = d->virt_base + EINTPEND_REG(bank->eint_offset); |
3568 | |
3569 | writel(1 << index, reg); |
3570 | } |
3571 | @@ -357,7 +359,7 @@ static int s3c64xx_gpio_irq_set_type(struct irq_data *irqd, unsigned int type) |
3572 | s3c64xx_irq_set_handler(irqd, type); |
3573 | |
3574 | /* Set up interrupt trigger */ |
3575 | - reg = bank->eint_base + EINTCON_REG(bank->eint_offset); |
3576 | + reg = d->virt_base + EINTCON_REG(bank->eint_offset); |
3577 | shift = EINT_OFFS(bank->eint_offset) + irqd->hwirq; |
3578 | shift = 4 * (shift / 4); /* 4 EINTs per trigger selector */ |
3579 | |
3580 | @@ -409,8 +411,7 @@ static void s3c64xx_eint_gpio_irq(struct irq_desc *desc) |
3581 | { |
3582 | struct irq_chip *chip = irq_desc_get_chip(desc); |
3583 | struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc); |
3584 | - struct irq_data *irqd = irq_desc_get_irq_data(desc); |
3585 | - struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); |
3586 | + struct samsung_pinctrl_drv_data *drvdata = data->drvdata; |
3587 | |
3588 | chained_irq_enter(chip, desc); |
3589 | |
3590 | @@ -420,7 +421,7 @@ static void s3c64xx_eint_gpio_irq(struct irq_desc *desc) |
3591 | unsigned int pin; |
3592 | unsigned int virq; |
3593 | |
3594 | - svc = readl(bank->eint_base + SERVICE_REG); |
3595 | + svc = readl(drvdata->virt_base + SERVICE_REG); |
3596 | group = SVC_GROUP(svc); |
3597 | pin = svc & SVC_NUM_MASK; |
3598 | |
3599 | @@ -515,15 +516,15 @@ static inline void s3c64xx_eint0_irq_set_mask(struct irq_data *irqd, bool mask) |
3600 | { |
3601 | struct s3c64xx_eint0_domain_data *ddata = |
3602 | irq_data_get_irq_chip_data(irqd); |
3603 | - struct samsung_pin_bank *bank = ddata->bank; |
3604 | + struct samsung_pinctrl_drv_data *d = ddata->bank->drvdata; |
3605 | u32 val; |
3606 | |
3607 | - val = readl(bank->eint_base + EINT0MASK_REG); |
3608 | + val = readl(d->virt_base + EINT0MASK_REG); |
3609 | if (mask) |
3610 | val |= 1 << ddata->eints[irqd->hwirq]; |
3611 | else |
3612 | val &= ~(1 << ddata->eints[irqd->hwirq]); |
3613 | - writel(val, bank->eint_base + EINT0MASK_REG); |
3614 | + writel(val, d->virt_base + EINT0MASK_REG); |
3615 | } |
3616 | |
3617 | static void s3c64xx_eint0_irq_unmask(struct irq_data *irqd) |
3618 | @@ -540,10 +541,10 @@ static void s3c64xx_eint0_irq_ack(struct irq_data *irqd) |
3619 | { |
3620 | struct s3c64xx_eint0_domain_data *ddata = |
3621 | irq_data_get_irq_chip_data(irqd); |
3622 | - struct samsung_pin_bank *bank = ddata->bank; |
3623 | + struct samsung_pinctrl_drv_data *d = ddata->bank->drvdata; |
3624 | |
3625 | writel(1 << ddata->eints[irqd->hwirq], |
3626 | - bank->eint_base + EINT0PEND_REG); |
3627 | + d->virt_base + EINT0PEND_REG); |
3628 | } |
3629 | |
3630 | static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type) |
3631 | @@ -551,7 +552,7 @@ static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type) |
3632 | struct s3c64xx_eint0_domain_data *ddata = |
3633 | irq_data_get_irq_chip_data(irqd); |
3634 | struct samsung_pin_bank *bank = ddata->bank; |
3635 | - struct samsung_pinctrl_drv_data *d = ddata->bank->drvdata; |
3636 | + struct samsung_pinctrl_drv_data *d = bank->drvdata; |
3637 | void __iomem *reg; |
3638 | int trigger; |
3639 | u8 shift; |
3640 | @@ -566,7 +567,7 @@ static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type) |
3641 | s3c64xx_irq_set_handler(irqd, type); |
3642 | |
3643 | /* Set up interrupt trigger */ |
3644 | - reg = bank->eint_base + EINT0CON0_REG; |
3645 | + reg = d->virt_base + EINT0CON0_REG; |
3646 | shift = ddata->eints[irqd->hwirq]; |
3647 | if (shift >= EINT_MAX_PER_REG) { |
3648 | reg += 4; |
3649 | @@ -598,19 +599,14 @@ static struct irq_chip s3c64xx_eint0_irq_chip = { |
3650 | static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range) |
3651 | { |
3652 | struct irq_chip *chip = irq_desc_get_chip(desc); |
3653 | - struct irq_data *irqd = irq_desc_get_irq_data(desc); |
3654 | - struct s3c64xx_eint0_domain_data *ddata = |
3655 | - irq_data_get_irq_chip_data(irqd); |
3656 | - struct samsung_pin_bank *bank = ddata->bank; |
3657 | - |
3658 | struct s3c64xx_eint0_data *data = irq_desc_get_handler_data(desc); |
3659 | - |
3660 | + struct samsung_pinctrl_drv_data *drvdata = data->drvdata; |
3661 | unsigned int pend, mask; |
3662 | |
3663 | chained_irq_enter(chip, desc); |
3664 | |
3665 | - pend = readl(bank->eint_base + EINT0PEND_REG); |
3666 | - mask = readl(bank->eint_base + EINT0MASK_REG); |
3667 | + pend = readl(drvdata->virt_base + EINT0PEND_REG); |
3668 | + mask = readl(drvdata->virt_base + EINT0MASK_REG); |
3669 | |
3670 | pend = pend & range & ~mask; |
3671 | pend &= range; |
3672 | diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c |
3673 | index f542642eed8d..61bbd54e35ba 100644 |
3674 | --- a/drivers/pinctrl/samsung/pinctrl-samsung.c |
3675 | +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c |
3676 | @@ -1013,6 +1013,12 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, |
3677 | bank->eint_base = virt_base[0]; |
3678 | bank->pctl_base = virt_base[bdata->pctl_res_idx]; |
3679 | } |
3680 | + /* |
3681 | + * Legacy platforms should provide only one resource with IO memory. |
3682 | + * Store it as virt_base because legacy driver needs to access it |
3683 | + * through samsung_pinctrl_drv_data. |
3684 | + */ |
3685 | + d->virt_base = virt_base[0]; |
3686 | |
3687 | for_each_child_of_node(node, np) { |
3688 | if (!of_find_property(np, "gpio-controller", NULL)) |
3689 | diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h |
3690 | index 515a61035e54..61c4cab0ad24 100644 |
3691 | --- a/drivers/pinctrl/samsung/pinctrl-samsung.h |
3692 | +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h |
3693 | @@ -247,6 +247,10 @@ struct samsung_pin_ctrl { |
3694 | /** |
3695 | * struct samsung_pinctrl_drv_data: wrapper for holding driver data together. |
3696 | * @node: global list node |
3697 | + * @virt_base: register base address of the controller; this will be equal |
3698 | + * to each bank samsung_pin_bank->pctl_base and used on legacy |
3699 | + * platforms (like S3C24XX or S3C64XX) which has to access the base |
3700 | + * through samsung_pinctrl_drv_data, not samsung_pin_bank). |
3701 | * @dev: device instance representing the controller. |
3702 | * @irq: interrpt number used by the controller to notify gpio interrupts. |
3703 | * @ctrl: pin controller instance managed by the driver. |
3704 | @@ -262,6 +266,7 @@ struct samsung_pin_ctrl { |
3705 | */ |
3706 | struct samsung_pinctrl_drv_data { |
3707 | struct list_head node; |
3708 | + void __iomem *virt_base; |
3709 | struct device *dev; |
3710 | int irq; |
3711 | |
3712 | diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c |
3713 | index cc98aceed1c1..ce1cab320f6f 100644 |
3714 | --- a/drivers/regulator/cpcap-regulator.c |
3715 | +++ b/drivers/regulator/cpcap-regulator.c |
3716 | @@ -77,6 +77,8 @@ |
3717 | #define CPCAP_BIT_VAUDIO_MODE0 BIT(1) |
3718 | #define CPCAP_BIT_V_AUDIO_EN BIT(0) |
3719 | |
3720 | +#define CPCAP_BIT_AUDIO_NORMAL_MODE 0x00 |
3721 | + |
3722 | /* |
3723 | * Off mode configuration bit. Used currently only by SW5 on omap4. There's |
3724 | * the following comment in Motorola Linux kernel tree for it: |
3725 | @@ -217,7 +219,7 @@ static unsigned int cpcap_regulator_get_mode(struct regulator_dev *rdev) |
3726 | |
3727 | regmap_read(rdev->regmap, rdev->desc->enable_reg, &value); |
3728 | |
3729 | - if (!(value & CPCAP_BIT_AUDIO_LOW_PWR)) |
3730 | + if (value & CPCAP_BIT_AUDIO_LOW_PWR) |
3731 | return REGULATOR_MODE_STANDBY; |
3732 | |
3733 | return REGULATOR_MODE_NORMAL; |
3734 | @@ -230,10 +232,10 @@ static int cpcap_regulator_set_mode(struct regulator_dev *rdev, |
3735 | |
3736 | switch (mode) { |
3737 | case REGULATOR_MODE_NORMAL: |
3738 | - value = CPCAP_BIT_AUDIO_LOW_PWR; |
3739 | + value = CPCAP_BIT_AUDIO_NORMAL_MODE; |
3740 | break; |
3741 | case REGULATOR_MODE_STANDBY: |
3742 | - value = 0; |
3743 | + value = CPCAP_BIT_AUDIO_LOW_PWR; |
3744 | break; |
3745 | default: |
3746 | return -EINVAL; |
3747 | diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c |
3748 | index d5bf36ec8a75..34367d172961 100644 |
3749 | --- a/drivers/s390/scsi/zfcp_dbf.c |
3750 | +++ b/drivers/s390/scsi/zfcp_dbf.c |
3751 | @@ -3,7 +3,7 @@ |
3752 | * |
3753 | * Debug traces for zfcp. |
3754 | * |
3755 | - * Copyright IBM Corp. 2002, 2016 |
3756 | + * Copyright IBM Corp. 2002, 2017 |
3757 | */ |
3758 | |
3759 | #define KMSG_COMPONENT "zfcp" |
3760 | @@ -447,6 +447,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag, |
3761 | struct fc_ct_hdr *reqh = sg_virt(ct_els->req); |
3762 | struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1); |
3763 | struct scatterlist *resp_entry = ct_els->resp; |
3764 | + struct fc_ct_hdr *resph; |
3765 | struct fc_gpn_ft_resp *acc; |
3766 | int max_entries, x, last = 0; |
3767 | |
3768 | @@ -473,6 +474,13 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag, |
3769 | return len; /* not GPN_FT response so do not cap */ |
3770 | |
3771 | acc = sg_virt(resp_entry); |
3772 | + |
3773 | + /* cap all but accept CT responses to at least the CT header */ |
3774 | + resph = (struct fc_ct_hdr *)acc; |
3775 | + if ((ct_els->status) || |
3776 | + (resph->ct_cmd != cpu_to_be16(FC_FS_ACC))) |
3777 | + return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD); |
3778 | + |
3779 | max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp)) |
3780 | + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one |
3781 | * to account for header as 1st pseudo "entry" */; |
3782 | @@ -555,8 +563,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, |
3783 | rec->scsi_retries = sc->retries; |
3784 | rec->scsi_allowed = sc->allowed; |
3785 | rec->scsi_id = sc->device->id; |
3786 | - /* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */ |
3787 | rec->scsi_lun = (u32)sc->device->lun; |
3788 | + rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32); |
3789 | rec->host_scribble = (unsigned long)sc->host_scribble; |
3790 | |
3791 | memcpy(rec->scsi_opcode, sc->cmnd, |
3792 | @@ -564,19 +572,32 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, |
3793 | |
3794 | if (fsf) { |
3795 | rec->fsf_req_id = fsf->req_id; |
3796 | + rec->pl_len = FCP_RESP_WITH_EXT; |
3797 | fcp_rsp = (struct fcp_resp_with_ext *) |
3798 | &(fsf->qtcb->bottom.io.fcp_rsp); |
3799 | + /* mandatory parts of FCP_RSP IU in this SCSI record */ |
3800 | memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT); |
3801 | if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) { |
3802 | fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; |
3803 | rec->fcp_rsp_info = fcp_rsp_info->rsp_code; |
3804 | + rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len); |
3805 | } |
3806 | if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) { |
3807 | - rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE, |
3808 | - (u16)ZFCP_DBF_PAY_MAX_REC); |
3809 | - zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len, |
3810 | - "fcp_sns", fsf->req_id); |
3811 | + rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len); |
3812 | } |
3813 | + /* complete FCP_RSP IU in associated PAYload record |
3814 | + * but only if there are optional parts |
3815 | + */ |
3816 | + if (fcp_rsp->resp.fr_flags != 0) |
3817 | + zfcp_dbf_pl_write( |
3818 | + dbf, fcp_rsp, |
3819 | + /* at least one full PAY record |
3820 | + * but not beyond hardware response field |
3821 | + */ |
3822 | + min_t(u16, max_t(u16, rec->pl_len, |
3823 | + ZFCP_DBF_PAY_MAX_REC), |
3824 | + FSF_FCP_RSP_SIZE), |
3825 | + "fcp_riu", fsf->req_id); |
3826 | } |
3827 | |
3828 | debug_event(dbf->scsi, level, rec, sizeof(*rec)); |
3829 | diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h |
3830 | index db186d44cfaf..b60667c145fd 100644 |
3831 | --- a/drivers/s390/scsi/zfcp_dbf.h |
3832 | +++ b/drivers/s390/scsi/zfcp_dbf.h |
3833 | @@ -2,7 +2,7 @@ |
3834 | * zfcp device driver |
3835 | * debug feature declarations |
3836 | * |
3837 | - * Copyright IBM Corp. 2008, 2016 |
3838 | + * Copyright IBM Corp. 2008, 2017 |
3839 | */ |
3840 | |
3841 | #ifndef ZFCP_DBF_H |
3842 | @@ -204,7 +204,7 @@ enum zfcp_dbf_scsi_id { |
3843 | * @id: unique number of recovery record type |
3844 | * @tag: identifier string specifying the location of initiation |
3845 | * @scsi_id: scsi device id |
3846 | - * @scsi_lun: scsi device logical unit number |
3847 | + * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit |
3848 | * @scsi_result: scsi result |
3849 | * @scsi_retries: current retry number of scsi request |
3850 | * @scsi_allowed: allowed retries |
3851 | @@ -214,6 +214,7 @@ enum zfcp_dbf_scsi_id { |
3852 | * @host_scribble: LLD specific data attached to SCSI request |
3853 | * @pl_len: length of paload stored as zfcp_dbf_pay |
3854 | * @fsf_rsp: response for fsf request |
3855 | + * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit |
3856 | */ |
3857 | struct zfcp_dbf_scsi { |
3858 | u8 id; |
3859 | @@ -230,6 +231,7 @@ struct zfcp_dbf_scsi { |
3860 | u64 host_scribble; |
3861 | u16 pl_len; |
3862 | struct fcp_resp_with_ext fcp_rsp; |
3863 | + u32 scsi_lun_64_hi; |
3864 | } __packed; |
3865 | |
3866 | /** |
3867 | @@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) |
3868 | { |
3869 | struct fsf_qtcb *qtcb = req->qtcb; |
3870 | |
3871 | - if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && |
3872 | + if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED | |
3873 | + ZFCP_STATUS_FSFREQ_ERROR))) { |
3874 | + zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req); |
3875 | + |
3876 | + } else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && |
3877 | (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { |
3878 | zfcp_dbf_hba_fsf_resp("fs_perr", 1, req); |
3879 | |
3880 | @@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd, |
3881 | * @flag: indicates type of reset (Target Reset, Logical Unit Reset) |
3882 | */ |
3883 | static inline |
3884 | -void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag) |
3885 | +void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag, |
3886 | + struct zfcp_fsf_req *fsf_req) |
3887 | { |
3888 | char tmp_tag[ZFCP_DBF_TAG_LEN]; |
3889 | |
3890 | @@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag) |
3891 | memcpy(tmp_tag, "lr_", 3); |
3892 | |
3893 | memcpy(&tmp_tag[3], tag, 4); |
3894 | - _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL); |
3895 | + _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req); |
3896 | } |
3897 | |
3898 | /** |
3899 | diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h |
3900 | index df2b541c8287..a2275825186f 100644 |
3901 | --- a/drivers/s390/scsi/zfcp_fc.h |
3902 | +++ b/drivers/s390/scsi/zfcp_fc.h |
3903 | @@ -4,7 +4,7 @@ |
3904 | * Fibre Channel related definitions and inline functions for the zfcp |
3905 | * device driver |
3906 | * |
3907 | - * Copyright IBM Corp. 2009 |
3908 | + * Copyright IBM Corp. 2009, 2017 |
3909 | */ |
3910 | |
3911 | #ifndef ZFCP_FC_H |
3912 | @@ -279,6 +279,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp, |
3913 | !(rsp_flags & FCP_SNS_LEN_VAL) && |
3914 | fcp_rsp->resp.fr_status == SAM_STAT_GOOD) |
3915 | set_host_byte(scsi, DID_ERROR); |
3916 | + } else if (unlikely(rsp_flags & FCP_RESID_OVER)) { |
3917 | + /* FCP_DL was not sufficient for SCSI data length */ |
3918 | + if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD) |
3919 | + set_host_byte(scsi, DID_ERROR); |
3920 | } |
3921 | } |
3922 | |
3923 | diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c |
3924 | index 27ff38f839fc..1964391db904 100644 |
3925 | --- a/drivers/s390/scsi/zfcp_fsf.c |
3926 | +++ b/drivers/s390/scsi/zfcp_fsf.c |
3927 | @@ -928,8 +928,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) |
3928 | |
3929 | switch (header->fsf_status) { |
3930 | case FSF_GOOD: |
3931 | - zfcp_dbf_san_res("fsscth2", req); |
3932 | ct->status = 0; |
3933 | + zfcp_dbf_san_res("fsscth2", req); |
3934 | break; |
3935 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: |
3936 | zfcp_fsf_class_not_supp(req); |
3937 | @@ -1109,8 +1109,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) |
3938 | |
3939 | switch (header->fsf_status) { |
3940 | case FSF_GOOD: |
3941 | - zfcp_dbf_san_res("fsselh1", req); |
3942 | send_els->status = 0; |
3943 | + zfcp_dbf_san_res("fsselh1", req); |
3944 | break; |
3945 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: |
3946 | zfcp_fsf_class_not_supp(req); |
3947 | @@ -2258,7 +2258,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) |
3948 | fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; |
3949 | zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0); |
3950 | |
3951 | - if (scsi_prot_sg_count(scsi_cmnd)) { |
3952 | + if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) && |
3953 | + scsi_prot_sg_count(scsi_cmnd)) { |
3954 | zfcp_qdio_set_data_div(qdio, &req->qdio_req, |
3955 | scsi_prot_sg_count(scsi_cmnd)); |
3956 | retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, |
3957 | diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c |
3958 | index 0678cf714c0e..a1eeeaaa0fca 100644 |
3959 | --- a/drivers/s390/scsi/zfcp_scsi.c |
3960 | +++ b/drivers/s390/scsi/zfcp_scsi.c |
3961 | @@ -3,7 +3,7 @@ |
3962 | * |
3963 | * Interface to Linux SCSI midlayer. |
3964 | * |
3965 | - * Copyright IBM Corp. 2002, 2016 |
3966 | + * Copyright IBM Corp. 2002, 2017 |
3967 | */ |
3968 | |
3969 | #define KMSG_COMPONENT "zfcp" |
3970 | @@ -273,25 +273,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) |
3971 | |
3972 | zfcp_erp_wait(adapter); |
3973 | ret = fc_block_scsi_eh(scpnt); |
3974 | - if (ret) |
3975 | + if (ret) { |
3976 | + zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL); |
3977 | return ret; |
3978 | + } |
3979 | |
3980 | if (!(atomic_read(&adapter->status) & |
3981 | ZFCP_STATUS_COMMON_RUNNING)) { |
3982 | - zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags); |
3983 | + zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL); |
3984 | return SUCCESS; |
3985 | } |
3986 | } |
3987 | - if (!fsf_req) |
3988 | + if (!fsf_req) { |
3989 | + zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL); |
3990 | return FAILED; |
3991 | + } |
3992 | |
3993 | wait_for_completion(&fsf_req->completion); |
3994 | |
3995 | if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { |
3996 | - zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); |
3997 | + zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req); |
3998 | retval = FAILED; |
3999 | } else { |
4000 | - zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); |
4001 | + zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req); |
4002 | zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); |
4003 | } |
4004 | |
4005 | diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c |
4006 | index a1a2c71e1626..b051d97af468 100644 |
4007 | --- a/drivers/scsi/aacraid/aachba.c |
4008 | +++ b/drivers/scsi/aacraid/aachba.c |
4009 | @@ -594,6 +594,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) |
4010 | |
4011 | aac_fib_init(cmd_fibcontext); |
4012 | dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); |
4013 | + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4014 | |
4015 | dinfo->command = cpu_to_le32(VM_ContainerConfig); |
4016 | dinfo->type = cpu_to_le32(CT_READ_NAME); |
4017 | @@ -611,10 +612,8 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) |
4018 | /* |
4019 | * Check that the command queued to the controller |
4020 | */ |
4021 | - if (status == -EINPROGRESS) { |
4022 | - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4023 | + if (status == -EINPROGRESS) |
4024 | return 0; |
4025 | - } |
4026 | |
4027 | printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); |
4028 | aac_fib_complete(cmd_fibcontext); |
4029 | @@ -725,6 +724,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) |
4030 | |
4031 | dinfo->count = cpu_to_le32(scmd_id(scsicmd)); |
4032 | dinfo->type = cpu_to_le32(FT_FILESYS); |
4033 | + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4034 | |
4035 | status = aac_fib_send(ContainerCommand, |
4036 | fibptr, |
4037 | @@ -736,9 +736,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) |
4038 | /* |
4039 | * Check that the command queued to the controller |
4040 | */ |
4041 | - if (status == -EINPROGRESS) |
4042 | - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4043 | - else if (status < 0) { |
4044 | + if (status < 0 && status != -EINPROGRESS) { |
4045 | /* Inherit results from VM_NameServe, if any */ |
4046 | dresp->status = cpu_to_le32(ST_OK); |
4047 | _aac_probe_container2(context, fibptr); |
4048 | @@ -766,6 +764,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru |
4049 | dinfo->count = cpu_to_le32(scmd_id(scsicmd)); |
4050 | dinfo->type = cpu_to_le32(FT_FILESYS); |
4051 | scsicmd->SCp.ptr = (char *)callback; |
4052 | + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4053 | |
4054 | status = aac_fib_send(ContainerCommand, |
4055 | fibptr, |
4056 | @@ -777,10 +776,9 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru |
4057 | /* |
4058 | * Check that the command queued to the controller |
4059 | */ |
4060 | - if (status == -EINPROGRESS) { |
4061 | - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4062 | + if (status == -EINPROGRESS) |
4063 | return 0; |
4064 | - } |
4065 | + |
4066 | if (status < 0) { |
4067 | scsicmd->SCp.ptr = NULL; |
4068 | aac_fib_complete(fibptr); |
4069 | @@ -1126,6 +1124,7 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd) |
4070 | dinfo->command = cpu_to_le32(VM_ContainerConfig); |
4071 | dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID); |
4072 | dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); |
4073 | + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4074 | |
4075 | status = aac_fib_send(ContainerCommand, |
4076 | cmd_fibcontext, |
4077 | @@ -1138,10 +1137,8 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd) |
4078 | /* |
4079 | * Check that the command queued to the controller |
4080 | */ |
4081 | - if (status == -EINPROGRESS) { |
4082 | - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4083 | + if (status == -EINPROGRESS) |
4084 | return 0; |
4085 | - } |
4086 | |
4087 | printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status); |
4088 | aac_fib_complete(cmd_fibcontext); |
4089 | @@ -2335,16 +2332,14 @@ static int aac_read(struct scsi_cmnd * scsicmd) |
4090 | * Alocate and initialize a Fib |
4091 | */ |
4092 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); |
4093 | - |
4094 | + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4095 | status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); |
4096 | |
4097 | /* |
4098 | * Check that the command queued to the controller |
4099 | */ |
4100 | - if (status == -EINPROGRESS) { |
4101 | - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4102 | + if (status == -EINPROGRESS) |
4103 | return 0; |
4104 | - } |
4105 | |
4106 | printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status); |
4107 | /* |
4108 | @@ -2429,16 +2424,14 @@ static int aac_write(struct scsi_cmnd * scsicmd) |
4109 | * Allocate and initialize a Fib then setup a BlockWrite command |
4110 | */ |
4111 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); |
4112 | - |
4113 | + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4114 | status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); |
4115 | |
4116 | /* |
4117 | * Check that the command queued to the controller |
4118 | */ |
4119 | - if (status == -EINPROGRESS) { |
4120 | - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4121 | + if (status == -EINPROGRESS) |
4122 | return 0; |
4123 | - } |
4124 | |
4125 | printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status); |
4126 | /* |
4127 | @@ -2588,6 +2581,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd) |
4128 | synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd)); |
4129 | synchronizecmd->count = |
4130 | cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data)); |
4131 | + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4132 | |
4133 | /* |
4134 | * Now send the Fib to the adapter |
4135 | @@ -2603,10 +2597,8 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd) |
4136 | /* |
4137 | * Check that the command queued to the controller |
4138 | */ |
4139 | - if (status == -EINPROGRESS) { |
4140 | - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4141 | + if (status == -EINPROGRESS) |
4142 | return 0; |
4143 | - } |
4144 | |
4145 | printk(KERN_WARNING |
4146 | "aac_synchronize: aac_fib_send failed with status: %d.\n", status); |
4147 | @@ -2666,6 +2658,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd) |
4148 | pmcmd->cid = cpu_to_le32(sdev_id(sdev)); |
4149 | pmcmd->parm = (scsicmd->cmnd[1] & 1) ? |
4150 | cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0; |
4151 | + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4152 | |
4153 | /* |
4154 | * Now send the Fib to the adapter |
4155 | @@ -2681,10 +2674,8 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd) |
4156 | /* |
4157 | * Check that the command queued to the controller |
4158 | */ |
4159 | - if (status == -EINPROGRESS) { |
4160 | - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4161 | + if (status == -EINPROGRESS) |
4162 | return 0; |
4163 | - } |
4164 | |
4165 | aac_fib_complete(cmd_fibcontext); |
4166 | aac_fib_free(cmd_fibcontext); |
4167 | @@ -3692,16 +3683,14 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) |
4168 | * Allocate and initialize a Fib then setup a BlockWrite command |
4169 | */ |
4170 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); |
4171 | - |
4172 | + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4173 | status = aac_adapter_scsi(cmd_fibcontext, scsicmd); |
4174 | |
4175 | /* |
4176 | * Check that the command queued to the controller |
4177 | */ |
4178 | - if (status == -EINPROGRESS) { |
4179 | - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4180 | + if (status == -EINPROGRESS) |
4181 | return 0; |
4182 | - } |
4183 | |
4184 | printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status); |
4185 | aac_fib_complete(cmd_fibcontext); |
4186 | @@ -3739,15 +3728,14 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd) |
4187 | if (!cmd_fibcontext) |
4188 | return -1; |
4189 | |
4190 | + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4191 | status = aac_adapter_hba(cmd_fibcontext, scsicmd); |
4192 | |
4193 | /* |
4194 | * Check that the command queued to the controller |
4195 | */ |
4196 | - if (status == -EINPROGRESS) { |
4197 | - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; |
4198 | + if (status == -EINPROGRESS) |
4199 | return 0; |
4200 | - } |
4201 | |
4202 | pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n", |
4203 | status); |
4204 | diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c |
4205 | index 71c4746341ea..3ee4ea79f81a 100644 |
4206 | --- a/drivers/scsi/megaraid/megaraid_sas_base.c |
4207 | +++ b/drivers/scsi/megaraid/megaraid_sas_base.c |
4208 | @@ -1995,9 +1995,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc |
4209 | if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { |
4210 | cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; |
4211 | if (cmd_mfi->sync_cmd && |
4212 | - cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) |
4213 | + (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { |
4214 | + cmd_mfi->frame->hdr.cmd_status = |
4215 | + MFI_STAT_WRONG_STATE; |
4216 | megasas_complete_cmd(instance, |
4217 | cmd_mfi, DID_OK); |
4218 | + } |
4219 | } |
4220 | } |
4221 | } else { |
4222 | @@ -5478,7 +5481,8 @@ static int megasas_init_fw(struct megasas_instance *instance) |
4223 | instance->throttlequeuedepth = |
4224 | MEGASAS_THROTTLE_QUEUE_DEPTH; |
4225 | |
4226 | - if (resetwaittime > MEGASAS_RESET_WAIT_TIME) |
4227 | + if ((resetwaittime < 1) || |
4228 | + (resetwaittime > MEGASAS_RESET_WAIT_TIME)) |
4229 | resetwaittime = MEGASAS_RESET_WAIT_TIME; |
4230 | |
4231 | if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) |
4232 | @@ -5649,6 +5653,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, |
4233 | prev_aen.word = |
4234 | le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); |
4235 | |
4236 | + if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || |
4237 | + (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { |
4238 | + dev_info(&instance->pdev->dev, |
4239 | + "%s %d out of range class %d send by application\n", |
4240 | + __func__, __LINE__, curr_aen.members.class); |
4241 | + return 0; |
4242 | + } |
4243 | + |
4244 | /* |
4245 | * A class whose enum value is smaller is inclusive of all |
4246 | * higher values. If a PROGRESS (= -1) was previously |
4247 | diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c |
4248 | index 985510628f56..8152962f152d 100644 |
4249 | --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c |
4250 | +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c |
4251 | @@ -3287,7 +3287,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, |
4252 | mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | |
4253 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; |
4254 | |
4255 | - mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz); |
4256 | + mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size); |
4257 | } |
4258 | |
4259 | /** |
4260 | diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c |
4261 | index 2c3783684815..85e7bae4a7ef 100644 |
4262 | --- a/drivers/scsi/qedi/qedi_main.c |
4263 | +++ b/drivers/scsi/qedi/qedi_main.c |
4264 | @@ -1575,7 +1575,7 @@ struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid) |
4265 | { |
4266 | struct qedi_cmd *cmd = NULL; |
4267 | |
4268 | - if (tid > MAX_ISCSI_TASK_ENTRIES) |
4269 | + if (tid >= MAX_ISCSI_TASK_ENTRIES) |
4270 | return NULL; |
4271 | |
4272 | cmd = qedi->itt_map[tid].p_cmd; |
4273 | diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c |
4274 | index 08a1feb3a195..8c6ff1682fb1 100644 |
4275 | --- a/drivers/scsi/qla2xxx/qla_attr.c |
4276 | +++ b/drivers/scsi/qla2xxx/qla_attr.c |
4277 | @@ -318,6 +318,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, |
4278 | return -EINVAL; |
4279 | if (start > ha->optrom_size) |
4280 | return -EINVAL; |
4281 | + if (size > ha->optrom_size - start) |
4282 | + size = ha->optrom_size - start; |
4283 | |
4284 | mutex_lock(&ha->optrom_mutex); |
4285 | switch (val) { |
4286 | @@ -343,8 +345,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, |
4287 | } |
4288 | |
4289 | ha->optrom_region_start = start; |
4290 | - ha->optrom_region_size = start + size > ha->optrom_size ? |
4291 | - ha->optrom_size - start : size; |
4292 | + ha->optrom_region_size = start + size; |
4293 | |
4294 | ha->optrom_state = QLA_SREADING; |
4295 | ha->optrom_buffer = vmalloc(ha->optrom_region_size); |
4296 | @@ -417,8 +418,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, |
4297 | } |
4298 | |
4299 | ha->optrom_region_start = start; |
4300 | - ha->optrom_region_size = start + size > ha->optrom_size ? |
4301 | - ha->optrom_size - start : size; |
4302 | + ha->optrom_region_size = start + size; |
4303 | |
4304 | ha->optrom_state = QLA_SWRITING; |
4305 | ha->optrom_buffer = vmalloc(ha->optrom_region_size); |
4306 | diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c |
4307 | index b323a7c71eda..0ec250993e93 100644 |
4308 | --- a/drivers/scsi/qla2xxx/qla_gs.c |
4309 | +++ b/drivers/scsi/qla2xxx/qla_gs.c |
4310 | @@ -3080,7 +3080,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) |
4311 | GPSC_RSP_SIZE); |
4312 | |
4313 | /* GPSC req */ |
4314 | - memcpy(ct_req->req.gpsc.port_name, fcport->port_name, |
4315 | + memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name, |
4316 | WWN_SIZE); |
4317 | |
4318 | sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; |
4319 | diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c |
4320 | index 072ad1aa5505..8f83571afc7b 100644 |
4321 | --- a/drivers/scsi/qla2xxx/qla_init.c |
4322 | +++ b/drivers/scsi/qla2xxx/qla_init.c |
4323 | @@ -7804,6 +7804,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, |
4324 | ha->queue_pair_map[qpair_id] = qpair; |
4325 | qpair->id = qpair_id; |
4326 | qpair->vp_idx = vp_idx; |
4327 | + qpair->fw_started = ha->flags.fw_started; |
4328 | INIT_LIST_HEAD(&qpair->hints_list); |
4329 | qpair->chip_reset = ha->base_qpair->chip_reset; |
4330 | qpair->enable_class_2 = ha->base_qpair->enable_class_2; |
4331 | diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c |
4332 | index 7b3b702ef622..ec2c398f5663 100644 |
4333 | --- a/drivers/scsi/qla2xxx/qla_isr.c |
4334 | +++ b/drivers/scsi/qla2xxx/qla_isr.c |
4335 | @@ -3429,7 +3429,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) |
4336 | } |
4337 | |
4338 | /* Enable MSI-X vector for response queue update for queue 0 */ |
4339 | - if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { |
4340 | + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { |
4341 | if (ha->msixbase && ha->mqiobase && |
4342 | (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || |
4343 | ql2xmqsupport)) |
4344 | diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c |
4345 | index 7c6d1a404011..1f1a81c6eaa9 100644 |
4346 | --- a/drivers/scsi/qla2xxx/qla_mbx.c |
4347 | +++ b/drivers/scsi/qla2xxx/qla_mbx.c |
4348 | @@ -54,6 +54,7 @@ static struct rom_cmd { |
4349 | { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, |
4350 | { MBC_GET_RETRY_COUNT }, |
4351 | { MBC_TRACE_CONTROL }, |
4352 | + { MBC_INITIALIZE_MULTIQ }, |
4353 | }; |
4354 | |
4355 | static int is_rom_cmd(uint16_t cmd) |
4356 | @@ -3689,7 +3690,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, |
4357 | if (qla_ini_mode_enabled(vha) && |
4358 | ha->flags.fawwpn_enabled && |
4359 | (rptid_entry->u.f1.flags & |
4360 | - VP_FLAGS_NAME_VALID)) { |
4361 | + BIT_6)) { |
4362 | memcpy(vha->port_name, |
4363 | rptid_entry->u.f1.port_name, |
4364 | WWN_SIZE); |
4365 | diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c |
4366 | index f0605cd196fb..3089094b48fa 100644 |
4367 | --- a/drivers/scsi/qla2xxx/qla_mid.c |
4368 | +++ b/drivers/scsi/qla2xxx/qla_mid.c |
4369 | @@ -74,7 +74,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) |
4370 | * ensures no active vp_list traversal while the vport is removed |
4371 | * from the queue) |
4372 | */ |
4373 | - wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count), |
4374 | + wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count), |
4375 | 10*HZ); |
4376 | |
4377 | spin_lock_irqsave(&ha->vport_slock, flags); |
4378 | diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c |
4379 | index e101cd3043b9..4e2a64773060 100644 |
4380 | --- a/drivers/scsi/qla2xxx/qla_target.c |
4381 | +++ b/drivers/scsi/qla2xxx/qla_target.c |
4382 | @@ -6796,7 +6796,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) |
4383 | if (!QLA_TGT_MODE_ENABLED()) |
4384 | return; |
4385 | |
4386 | - if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { |
4387 | + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { |
4388 | ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; |
4389 | ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; |
4390 | } else { |
4391 | diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c |
4392 | index aad6ebb51735..1a9de8419997 100644 |
4393 | --- a/drivers/scsi/sg.c |
4394 | +++ b/drivers/scsi/sg.c |
4395 | @@ -828,6 +828,39 @@ static int max_sectors_bytes(struct request_queue *q) |
4396 | return max_sectors << 9; |
4397 | } |
4398 | |
4399 | +static void |
4400 | +sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) |
4401 | +{ |
4402 | + Sg_request *srp; |
4403 | + int val; |
4404 | + unsigned int ms; |
4405 | + |
4406 | + val = 0; |
4407 | + list_for_each_entry(srp, &sfp->rq_list, entry) { |
4408 | + if (val > SG_MAX_QUEUE) |
4409 | + break; |
4410 | + rinfo[val].req_state = srp->done + 1; |
4411 | + rinfo[val].problem = |
4412 | + srp->header.masked_status & |
4413 | + srp->header.host_status & |
4414 | + srp->header.driver_status; |
4415 | + if (srp->done) |
4416 | + rinfo[val].duration = |
4417 | + srp->header.duration; |
4418 | + else { |
4419 | + ms = jiffies_to_msecs(jiffies); |
4420 | + rinfo[val].duration = |
4421 | + (ms > srp->header.duration) ? |
4422 | + (ms - srp->header.duration) : 0; |
4423 | + } |
4424 | + rinfo[val].orphan = srp->orphan; |
4425 | + rinfo[val].sg_io_owned = srp->sg_io_owned; |
4426 | + rinfo[val].pack_id = srp->header.pack_id; |
4427 | + rinfo[val].usr_ptr = srp->header.usr_ptr; |
4428 | + val++; |
4429 | + } |
4430 | +} |
4431 | + |
4432 | static long |
4433 | sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) |
4434 | { |
4435 | @@ -1012,38 +1045,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) |
4436 | return -EFAULT; |
4437 | else { |
4438 | sg_req_info_t *rinfo; |
4439 | - unsigned int ms; |
4440 | |
4441 | - rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, |
4442 | - GFP_KERNEL); |
4443 | + rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, |
4444 | + GFP_KERNEL); |
4445 | if (!rinfo) |
4446 | return -ENOMEM; |
4447 | read_lock_irqsave(&sfp->rq_list_lock, iflags); |
4448 | - val = 0; |
4449 | - list_for_each_entry(srp, &sfp->rq_list, entry) { |
4450 | - if (val >= SG_MAX_QUEUE) |
4451 | - break; |
4452 | - memset(&rinfo[val], 0, SZ_SG_REQ_INFO); |
4453 | - rinfo[val].req_state = srp->done + 1; |
4454 | - rinfo[val].problem = |
4455 | - srp->header.masked_status & |
4456 | - srp->header.host_status & |
4457 | - srp->header.driver_status; |
4458 | - if (srp->done) |
4459 | - rinfo[val].duration = |
4460 | - srp->header.duration; |
4461 | - else { |
4462 | - ms = jiffies_to_msecs(jiffies); |
4463 | - rinfo[val].duration = |
4464 | - (ms > srp->header.duration) ? |
4465 | - (ms - srp->header.duration) : 0; |
4466 | - } |
4467 | - rinfo[val].orphan = srp->orphan; |
4468 | - rinfo[val].sg_io_owned = srp->sg_io_owned; |
4469 | - rinfo[val].pack_id = srp->header.pack_id; |
4470 | - rinfo[val].usr_ptr = srp->header.usr_ptr; |
4471 | - val++; |
4472 | - } |
4473 | + sg_fill_request_table(sfp, rinfo); |
4474 | read_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
4475 | result = __copy_to_user(p, rinfo, |
4476 | SZ_SG_REQ_INFO * SG_MAX_QUEUE); |
4477 | diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c |
4478 | index 3cc8d67783a1..5e7200f05873 100644 |
4479 | --- a/drivers/scsi/storvsc_drv.c |
4480 | +++ b/drivers/scsi/storvsc_drv.c |
4481 | @@ -1640,6 +1640,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) |
4482 | put_cpu(); |
4483 | |
4484 | if (ret == -EAGAIN) { |
4485 | + if (payload_sz > sizeof(cmd_request->mpb)) |
4486 | + kfree(payload); |
4487 | /* no more space */ |
4488 | return SCSI_MLQUEUE_DEVICE_BUSY; |
4489 | } |
4490 | diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c |
4491 | index 4e7a4e9dcf4d..f8eba1c5412f 100644 |
4492 | --- a/drivers/tty/tty_buffer.c |
4493 | +++ b/drivers/tty/tty_buffer.c |
4494 | @@ -361,6 +361,32 @@ int tty_insert_flip_string_flags(struct tty_port *port, |
4495 | } |
4496 | EXPORT_SYMBOL(tty_insert_flip_string_flags); |
4497 | |
4498 | +/** |
4499 | + * __tty_insert_flip_char - Add one character to the tty buffer |
4500 | + * @port: tty port |
4501 | + * @ch: character |
4502 | + * @flag: flag byte |
4503 | + * |
4504 | + * Queue a single byte to the tty buffering, with an optional flag. |
4505 | + * This is the slow path of tty_insert_flip_char. |
4506 | + */ |
4507 | +int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag) |
4508 | +{ |
4509 | + struct tty_buffer *tb; |
4510 | + int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0; |
4511 | + |
4512 | + if (!__tty_buffer_request_room(port, 1, flags)) |
4513 | + return 0; |
4514 | + |
4515 | + tb = port->buf.tail; |
4516 | + if (~tb->flags & TTYB_NORMAL) |
4517 | + *flag_buf_ptr(tb, tb->used) = flag; |
4518 | + *char_buf_ptr(tb, tb->used++) = ch; |
4519 | + |
4520 | + return 1; |
4521 | +} |
4522 | +EXPORT_SYMBOL(__tty_insert_flip_char); |
4523 | + |
4524 | /** |
4525 | * tty_schedule_flip - push characters to ldisc |
4526 | * @port: tty port to push from |
4527 | diff --git a/fs/ext4/file.c b/fs/ext4/file.c |
4528 | index 0d7cf0cc9b87..86ea1d92839a 100644 |
4529 | --- a/fs/ext4/file.c |
4530 | +++ b/fs/ext4/file.c |
4531 | @@ -595,7 +595,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) |
4532 | inode_lock(inode); |
4533 | |
4534 | isize = i_size_read(inode); |
4535 | - if (offset >= isize) { |
4536 | + if (offset < 0 || offset >= isize) { |
4537 | inode_unlock(inode); |
4538 | return -ENXIO; |
4539 | } |
4540 | @@ -658,7 +658,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) |
4541 | inode_lock(inode); |
4542 | |
4543 | isize = i_size_read(inode); |
4544 | - if (offset >= isize) { |
4545 | + if (offset < 0 || offset >= isize) { |
4546 | inode_unlock(inode); |
4547 | return -ENXIO; |
4548 | } |
4549 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
4550 | index d61a70e2193a..c9e7be58756b 100644 |
4551 | --- a/fs/ext4/super.c |
4552 | +++ b/fs/ext4/super.c |
4553 | @@ -2404,6 +2404,7 @@ static void ext4_orphan_cleanup(struct super_block *sb, |
4554 | unsigned int s_flags = sb->s_flags; |
4555 | int ret, nr_orphans = 0, nr_truncates = 0; |
4556 | #ifdef CONFIG_QUOTA |
4557 | + int quota_update = 0; |
4558 | int i; |
4559 | #endif |
4560 | if (!es->s_last_orphan) { |
4561 | @@ -2442,14 +2443,32 @@ static void ext4_orphan_cleanup(struct super_block *sb, |
4562 | #ifdef CONFIG_QUOTA |
4563 | /* Needed for iput() to work correctly and not trash data */ |
4564 | sb->s_flags |= MS_ACTIVE; |
4565 | - /* Turn on quotas so that they are updated correctly */ |
4566 | + |
4567 | + /* |
4568 | + * Turn on quotas which were not enabled for read-only mounts if |
4569 | + * filesystem has quota feature, so that they are updated correctly. |
4570 | + */ |
4571 | + if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) { |
4572 | + int ret = ext4_enable_quotas(sb); |
4573 | + |
4574 | + if (!ret) |
4575 | + quota_update = 1; |
4576 | + else |
4577 | + ext4_msg(sb, KERN_ERR, |
4578 | + "Cannot turn on quotas: error %d", ret); |
4579 | + } |
4580 | + |
4581 | + /* Turn on journaled quotas used for old sytle */ |
4582 | for (i = 0; i < EXT4_MAXQUOTAS; i++) { |
4583 | if (EXT4_SB(sb)->s_qf_names[i]) { |
4584 | int ret = ext4_quota_on_mount(sb, i); |
4585 | - if (ret < 0) |
4586 | + |
4587 | + if (!ret) |
4588 | + quota_update = 1; |
4589 | + else |
4590 | ext4_msg(sb, KERN_ERR, |
4591 | "Cannot turn on journaled " |
4592 | - "quota: error %d", ret); |
4593 | + "quota: type %d: error %d", i, ret); |
4594 | } |
4595 | } |
4596 | #endif |
4597 | @@ -2510,10 +2529,12 @@ static void ext4_orphan_cleanup(struct super_block *sb, |
4598 | ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up", |
4599 | PLURAL(nr_truncates)); |
4600 | #ifdef CONFIG_QUOTA |
4601 | - /* Turn quotas off */ |
4602 | - for (i = 0; i < EXT4_MAXQUOTAS; i++) { |
4603 | - if (sb_dqopt(sb)->files[i]) |
4604 | - dquot_quota_off(sb, i); |
4605 | + /* Turn off quotas if they were enabled for orphan cleanup */ |
4606 | + if (quota_update) { |
4607 | + for (i = 0; i < EXT4_MAXQUOTAS; i++) { |
4608 | + if (sb_dqopt(sb)->files[i]) |
4609 | + dquot_quota_off(sb, i); |
4610 | + } |
4611 | } |
4612 | #endif |
4613 | sb->s_flags = s_flags; /* Restore MS_RDONLY status */ |
4614 | @@ -5512,6 +5533,9 @@ static int ext4_enable_quotas(struct super_block *sb) |
4615 | DQUOT_USAGE_ENABLED | |
4616 | (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); |
4617 | if (err) { |
4618 | + for (type--; type >= 0; type--) |
4619 | + dquot_quota_off(sb, type); |
4620 | + |
4621 | ext4_warning(sb, |
4622 | "Failed to enable quota tracking " |
4623 | "(type=%d, err=%d). Please run " |
4624 | diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c |
4625 | index 7a3754488312..9409aac232f7 100644 |
4626 | --- a/fs/orangefs/acl.c |
4627 | +++ b/fs/orangefs/acl.c |
4628 | @@ -61,9 +61,9 @@ struct posix_acl *orangefs_get_acl(struct inode *inode, int type) |
4629 | return acl; |
4630 | } |
4631 | |
4632 | -int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
4633 | +static int __orangefs_set_acl(struct inode *inode, struct posix_acl *acl, |
4634 | + int type) |
4635 | { |
4636 | - struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); |
4637 | int error = 0; |
4638 | void *value = NULL; |
4639 | size_t size = 0; |
4640 | @@ -72,22 +72,6 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
4641 | switch (type) { |
4642 | case ACL_TYPE_ACCESS: |
4643 | name = XATTR_NAME_POSIX_ACL_ACCESS; |
4644 | - if (acl) { |
4645 | - umode_t mode; |
4646 | - |
4647 | - error = posix_acl_update_mode(inode, &mode, &acl); |
4648 | - if (error) { |
4649 | - gossip_err("%s: posix_acl_update_mode err: %d\n", |
4650 | - __func__, |
4651 | - error); |
4652 | - return error; |
4653 | - } |
4654 | - |
4655 | - if (inode->i_mode != mode) |
4656 | - SetModeFlag(orangefs_inode); |
4657 | - inode->i_mode = mode; |
4658 | - mark_inode_dirty_sync(inode); |
4659 | - } |
4660 | break; |
4661 | case ACL_TYPE_DEFAULT: |
4662 | name = XATTR_NAME_POSIX_ACL_DEFAULT; |
4663 | @@ -132,6 +116,29 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
4664 | return error; |
4665 | } |
4666 | |
4667 | +int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
4668 | +{ |
4669 | + int error; |
4670 | + |
4671 | + if (type == ACL_TYPE_ACCESS && acl) { |
4672 | + umode_t mode; |
4673 | + |
4674 | + error = posix_acl_update_mode(inode, &mode, &acl); |
4675 | + if (error) { |
4676 | + gossip_err("%s: posix_acl_update_mode err: %d\n", |
4677 | + __func__, |
4678 | + error); |
4679 | + return error; |
4680 | + } |
4681 | + |
4682 | + if (inode->i_mode != mode) |
4683 | + SetModeFlag(ORANGEFS_I(inode)); |
4684 | + inode->i_mode = mode; |
4685 | + mark_inode_dirty_sync(inode); |
4686 | + } |
4687 | + return __orangefs_set_acl(inode, acl, type); |
4688 | +} |
4689 | + |
4690 | int orangefs_init_acl(struct inode *inode, struct inode *dir) |
4691 | { |
4692 | struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); |
4693 | @@ -146,13 +153,14 @@ int orangefs_init_acl(struct inode *inode, struct inode *dir) |
4694 | return error; |
4695 | |
4696 | if (default_acl) { |
4697 | - error = orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); |
4698 | + error = __orangefs_set_acl(inode, default_acl, |
4699 | + ACL_TYPE_DEFAULT); |
4700 | posix_acl_release(default_acl); |
4701 | } |
4702 | |
4703 | if (acl) { |
4704 | if (!error) |
4705 | - error = orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS); |
4706 | + error = __orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS); |
4707 | posix_acl_release(acl); |
4708 | } |
4709 | |
4710 | diff --git a/include/linux/ccp.h b/include/linux/ccp.h |
4711 | index 3285c944194a..ab693c3afd0d 100644 |
4712 | --- a/include/linux/ccp.h |
4713 | +++ b/include/linux/ccp.h |
4714 | @@ -1,7 +1,7 @@ |
4715 | /* |
4716 | * AMD Cryptographic Coprocessor (CCP) driver |
4717 | * |
4718 | - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. |
4719 | + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. |
4720 | * |
4721 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
4722 | * Author: Gary R Hook <gary.hook@amd.com> |
4723 | @@ -231,6 +231,7 @@ enum ccp_xts_aes_unit_size { |
4724 | * AES operation the new IV overwrites the old IV. |
4725 | */ |
4726 | struct ccp_xts_aes_engine { |
4727 | + enum ccp_aes_type type; |
4728 | enum ccp_aes_action action; |
4729 | enum ccp_xts_aes_unit_size unit_size; |
4730 | |
4731 | diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h |
4732 | index 898cfe2eeb42..bc46e729fdde 100644 |
4733 | --- a/include/linux/cpuset.h |
4734 | +++ b/include/linux/cpuset.h |
4735 | @@ -57,7 +57,9 @@ static inline void cpuset_dec(void) |
4736 | |
4737 | extern int cpuset_init(void); |
4738 | extern void cpuset_init_smp(void); |
4739 | +extern void cpuset_force_rebuild(void); |
4740 | extern void cpuset_update_active_cpus(void); |
4741 | +extern void cpuset_wait_for_hotplug(void); |
4742 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
4743 | extern void cpuset_cpus_allowed_fallback(struct task_struct *p); |
4744 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
4745 | @@ -170,11 +172,15 @@ static inline bool cpusets_enabled(void) { return false; } |
4746 | static inline int cpuset_init(void) { return 0; } |
4747 | static inline void cpuset_init_smp(void) {} |
4748 | |
4749 | +static inline void cpuset_force_rebuild(void) { } |
4750 | + |
4751 | static inline void cpuset_update_active_cpus(void) |
4752 | { |
4753 | partition_sched_domains(1, NULL, NULL); |
4754 | } |
4755 | |
4756 | +static inline void cpuset_wait_for_hotplug(void) { } |
4757 | + |
4758 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
4759 | struct cpumask *mask) |
4760 | { |
4761 | diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h |
4762 | index 6383115e9d2c..2e028854bac7 100644 |
4763 | --- a/include/linux/ftrace.h |
4764 | +++ b/include/linux/ftrace.h |
4765 | @@ -307,7 +307,7 @@ DECLARE_PER_CPU(int, disable_stack_tracer); |
4766 | static inline void stack_tracer_disable(void) |
4767 | { |
4768 | /* Preemption or interupts must be disabled */ |
4769 | - if (IS_ENABLED(CONFIG_PREEMPT_DEBUG)) |
4770 | + if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) |
4771 | WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); |
4772 | this_cpu_inc(disable_stack_tracer); |
4773 | } |
4774 | @@ -320,7 +320,7 @@ static inline void stack_tracer_disable(void) |
4775 | */ |
4776 | static inline void stack_tracer_enable(void) |
4777 | { |
4778 | - if (IS_ENABLED(CONFIG_PREEMPT_DEBUG)) |
4779 | + if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) |
4780 | WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); |
4781 | this_cpu_dec(disable_stack_tracer); |
4782 | } |
4783 | diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h |
4784 | index c28dd523f96e..d43837f2ce3a 100644 |
4785 | --- a/include/linux/tty_flip.h |
4786 | +++ b/include/linux/tty_flip.h |
4787 | @@ -12,6 +12,7 @@ extern int tty_prepare_flip_string(struct tty_port *port, |
4788 | unsigned char **chars, size_t size); |
4789 | extern void tty_flip_buffer_push(struct tty_port *port); |
4790 | void tty_schedule_flip(struct tty_port *port); |
4791 | +int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag); |
4792 | |
4793 | static inline int tty_insert_flip_char(struct tty_port *port, |
4794 | unsigned char ch, char flag) |
4795 | @@ -26,7 +27,7 @@ static inline int tty_insert_flip_char(struct tty_port *port, |
4796 | *char_buf_ptr(tb, tb->used++) = ch; |
4797 | return 1; |
4798 | } |
4799 | - return tty_insert_flip_string_flags(port, &ch, &flag, 1); |
4800 | + return __tty_insert_flip_char(port, ch, flag); |
4801 | } |
4802 | |
4803 | static inline int tty_insert_flip_string(struct tty_port *port, |
4804 | diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h |
4805 | index acdd6f915a8d..20ef8e6ec2db 100644 |
4806 | --- a/include/linux/uaccess.h |
4807 | +++ b/include/linux/uaccess.h |
4808 | @@ -156,7 +156,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n) |
4809 | } |
4810 | #ifdef CONFIG_COMPAT |
4811 | static __always_inline unsigned long __must_check |
4812 | -copy_in_user(void __user *to, const void *from, unsigned long n) |
4813 | +copy_in_user(void __user *to, const void __user *from, unsigned long n) |
4814 | { |
4815 | might_fault(); |
4816 | if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) |
4817 | diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c |
4818 | index 87a1213dd326..e8cb34193433 100644 |
4819 | --- a/kernel/cgroup/cpuset.c |
4820 | +++ b/kernel/cgroup/cpuset.c |
4821 | @@ -2260,6 +2260,13 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs) |
4822 | mutex_unlock(&cpuset_mutex); |
4823 | } |
4824 | |
4825 | +static bool force_rebuild; |
4826 | + |
4827 | +void cpuset_force_rebuild(void) |
4828 | +{ |
4829 | + force_rebuild = true; |
4830 | +} |
4831 | + |
4832 | /** |
4833 | * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset |
4834 | * |
4835 | @@ -2334,8 +2341,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work) |
4836 | } |
4837 | |
4838 | /* rebuild sched domains if cpus_allowed has changed */ |
4839 | - if (cpus_updated) |
4840 | + if (cpus_updated || force_rebuild) { |
4841 | + force_rebuild = false; |
4842 | rebuild_sched_domains(); |
4843 | + } |
4844 | } |
4845 | |
4846 | void cpuset_update_active_cpus(void) |
4847 | @@ -2354,6 +2363,11 @@ void cpuset_update_active_cpus(void) |
4848 | schedule_work(&cpuset_hotplug_work); |
4849 | } |
4850 | |
4851 | +void cpuset_wait_for_hotplug(void) |
4852 | +{ |
4853 | + flush_work(&cpuset_hotplug_work); |
4854 | +} |
4855 | + |
4856 | /* |
4857 | * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. |
4858 | * Call this routine anytime after node_states[N_MEMORY] changes. |
4859 | diff --git a/kernel/cpu.c b/kernel/cpu.c |
4860 | index eee033134262..a88c29ab09be 100644 |
4861 | --- a/kernel/cpu.c |
4862 | +++ b/kernel/cpu.c |
4863 | @@ -1252,7 +1252,17 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, |
4864 | struct cpuhp_step *sp; |
4865 | int ret = 0; |
4866 | |
4867 | - if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) { |
4868 | + /* |
4869 | + * If name is NULL, then the state gets removed. |
4870 | + * |
4871 | + * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on |
4872 | + * the first allocation from these dynamic ranges, so the removal |
4873 | + * would trigger a new allocation and clear the wrong (already |
4874 | + * empty) state, leaving the callbacks of the to be cleared state |
4875 | + * dangling, which causes wreckage on the next hotplug operation. |
4876 | + */ |
4877 | + if (name && (state == CPUHP_AP_ONLINE_DYN || |
4878 | + state == CPUHP_BP_PREPARE_DYN)) { |
4879 | ret = cpuhp_reserve_state(state); |
4880 | if (ret < 0) |
4881 | return ret; |
4882 | diff --git a/kernel/power/process.c b/kernel/power/process.c |
4883 | index 78672d324a6e..50f25cb370c6 100644 |
4884 | --- a/kernel/power/process.c |
4885 | +++ b/kernel/power/process.c |
4886 | @@ -20,8 +20,9 @@ |
4887 | #include <linux/workqueue.h> |
4888 | #include <linux/kmod.h> |
4889 | #include <trace/events/power.h> |
4890 | +#include <linux/cpuset.h> |
4891 | |
4892 | -/* |
4893 | +/* |
4894 | * Timeout for stopping processes |
4895 | */ |
4896 | unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC; |
4897 | @@ -202,6 +203,8 @@ void thaw_processes(void) |
4898 | __usermodehelper_set_disable_depth(UMH_FREEZING); |
4899 | thaw_workqueues(); |
4900 | |
4901 | + cpuset_wait_for_hotplug(); |
4902 | + |
4903 | read_lock(&tasklist_lock); |
4904 | for_each_process_thread(g, p) { |
4905 | /* No other threads should have PF_SUSPEND_TASK set */ |
4906 | diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c |
4907 | index d0ca524bf042..258a9abee0b0 100644 |
4908 | --- a/kernel/rcu/srcutree.c |
4909 | +++ b/kernel/rcu/srcutree.c |
4910 | @@ -896,6 +896,15 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) |
4911 | __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); |
4912 | wait_for_completion(&rcu.completion); |
4913 | destroy_rcu_head_on_stack(&rcu.head); |
4914 | + |
4915 | + /* |
4916 | + * Make sure that later code is ordered after the SRCU grace |
4917 | + * period. This pairs with the raw_spin_lock_irq_rcu_node() |
4918 | + * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed |
4919 | + * because the current CPU might have been totally uninvolved with |
4920 | + * (and thus unordered against) that grace period. |
4921 | + */ |
4922 | + smp_mb(); |
4923 | } |
4924 | |
4925 | /** |
4926 | diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
4927 | index 0869b20fba81..99326c370c9c 100644 |
4928 | --- a/kernel/sched/core.c |
4929 | +++ b/kernel/sched/core.c |
4930 | @@ -5538,16 +5538,15 @@ static void cpuset_cpu_active(void) |
4931 | * operation in the resume sequence, just build a single sched |
4932 | * domain, ignoring cpusets. |
4933 | */ |
4934 | - num_cpus_frozen--; |
4935 | - if (likely(num_cpus_frozen)) { |
4936 | - partition_sched_domains(1, NULL, NULL); |
4937 | + partition_sched_domains(1, NULL, NULL); |
4938 | + if (--num_cpus_frozen) |
4939 | return; |
4940 | - } |
4941 | /* |
4942 | * This is the last CPU online operation. So fall through and |
4943 | * restore the original sched domains by considering the |
4944 | * cpuset configurations. |
4945 | */ |
4946 | + cpuset_force_rebuild(); |
4947 | } |
4948 | cpuset_update_active_cpus(); |
4949 | } |
4950 | diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c |
4951 | index 96cea88fa00f..725819569fa7 100644 |
4952 | --- a/kernel/trace/ftrace.c |
4953 | +++ b/kernel/trace/ftrace.c |
4954 | @@ -2828,13 +2828,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) |
4955 | |
4956 | if (!command || !ftrace_enabled) { |
4957 | /* |
4958 | - * If these are per_cpu ops, they still need their |
4959 | - * per_cpu field freed. Since, function tracing is |
4960 | + * If these are dynamic or per_cpu ops, they still |
4961 | + * need their data freed. Since, function tracing is |
4962 | * not currently active, we can just free them |
4963 | * without synchronizing all CPUs. |
4964 | */ |
4965 | - if (ops->flags & FTRACE_OPS_FL_PER_CPU) |
4966 | - per_cpu_ops_free(ops); |
4967 | + if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) |
4968 | + goto free_ops; |
4969 | + |
4970 | return 0; |
4971 | } |
4972 | |
4973 | @@ -2900,6 +2901,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) |
4974 | if (IS_ENABLED(CONFIG_PREEMPT)) |
4975 | synchronize_rcu_tasks(); |
4976 | |
4977 | + free_ops: |
4978 | arch_ftrace_trampoline_free(ops); |
4979 | |
4980 | if (ops->flags & FTRACE_OPS_FL_PER_CPU) |
4981 | diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
4982 | index 44004d8aa3b3..5efb4b63174e 100644 |
4983 | --- a/kernel/trace/trace.c |
4984 | +++ b/kernel/trace/trace.c |
4985 | @@ -2799,11 +2799,17 @@ static char *get_trace_buf(void) |
4986 | if (!buffer || buffer->nesting >= 4) |
4987 | return NULL; |
4988 | |
4989 | - return &buffer->buffer[buffer->nesting++][0]; |
4990 | + buffer->nesting++; |
4991 | + |
4992 | + /* Interrupts must see nesting incremented before we use the buffer */ |
4993 | + barrier(); |
4994 | + return &buffer->buffer[buffer->nesting][0]; |
4995 | } |
4996 | |
4997 | static void put_trace_buf(void) |
4998 | { |
4999 | + /* Don't let the decrement of nesting leak before this */ |
5000 | + barrier(); |
5001 | this_cpu_dec(trace_percpu_buffer->nesting); |
5002 | } |
5003 | |
5004 | @@ -6220,7 +6226,7 @@ static int tracing_set_clock(struct trace_array *tr, const char *clockstr) |
5005 | tracing_reset_online_cpus(&tr->trace_buffer); |
5006 | |
5007 | #ifdef CONFIG_TRACER_MAX_TRACE |
5008 | - if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) |
5009 | + if (tr->max_buffer.buffer) |
5010 | ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); |
5011 | tracing_reset_online_cpus(&tr->max_buffer); |
5012 | #endif |
5013 | diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c |
5014 | index 36132f9280e6..51a6e09a7410 100644 |
5015 | --- a/kernel/trace/trace_events.c |
5016 | +++ b/kernel/trace/trace_events.c |
5017 | @@ -406,7 +406,7 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file, |
5018 | |
5019 | if (file->flags & EVENT_FILE_FL_RECORDED_TGID) { |
5020 | tracing_stop_tgid_record(); |
5021 | - clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); |
5022 | + clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); |
5023 | } |
5024 | |
5025 | call->class->reg(call, TRACE_REG_UNREGISTER, file); |
5026 | diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c |
5027 | index cb917cebae29..b17ec642793b 100644 |
5028 | --- a/kernel/trace/trace_selftest.c |
5029 | +++ b/kernel/trace/trace_selftest.c |
5030 | @@ -273,7 +273,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt) |
5031 | goto out_free; |
5032 | if (cnt > 1) { |
5033 | if (trace_selftest_test_global_cnt == 0) |
5034 | - goto out; |
5035 | + goto out_free; |
5036 | } |
5037 | if (trace_selftest_test_dyn_cnt == 0) |
5038 | goto out_free; |
5039 | diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c |
5040 | index 9979f46c81dc..51390febd5e3 100644 |
5041 | --- a/net/netfilter/nf_conntrack_core.c |
5042 | +++ b/net/netfilter/nf_conntrack_core.c |
5043 | @@ -96,19 +96,26 @@ static struct conntrack_gc_work conntrack_gc_work; |
5044 | |
5045 | void nf_conntrack_lock(spinlock_t *lock) __acquires(lock) |
5046 | { |
5047 | + /* 1) Acquire the lock */ |
5048 | spin_lock(lock); |
5049 | - while (unlikely(nf_conntrack_locks_all)) { |
5050 | - spin_unlock(lock); |
5051 | |
5052 | - /* |
5053 | - * Order the 'nf_conntrack_locks_all' load vs. the |
5054 | - * spin_unlock_wait() loads below, to ensure |
5055 | - * that 'nf_conntrack_locks_all_lock' is indeed held: |
5056 | - */ |
5057 | - smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */ |
5058 | - spin_unlock_wait(&nf_conntrack_locks_all_lock); |
5059 | - spin_lock(lock); |
5060 | - } |
5061 | + /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics |
5062 | + * It pairs with the smp_store_release() in nf_conntrack_all_unlock() |
5063 | + */ |
5064 | + if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false)) |
5065 | + return; |
5066 | + |
5067 | + /* fast path failed, unlock */ |
5068 | + spin_unlock(lock); |
5069 | + |
5070 | + /* Slow path 1) get global lock */ |
5071 | + spin_lock(&nf_conntrack_locks_all_lock); |
5072 | + |
5073 | + /* Slow path 2) get the lock we want */ |
5074 | + spin_lock(lock); |
5075 | + |
5076 | + /* Slow path 3) release the global lock */ |
5077 | + spin_unlock(&nf_conntrack_locks_all_lock); |
5078 | } |
5079 | EXPORT_SYMBOL_GPL(nf_conntrack_lock); |
5080 | |
5081 | @@ -149,28 +156,27 @@ static void nf_conntrack_all_lock(void) |
5082 | int i; |
5083 | |
5084 | spin_lock(&nf_conntrack_locks_all_lock); |
5085 | - nf_conntrack_locks_all = true; |
5086 | |
5087 | - /* |
5088 | - * Order the above store of 'nf_conntrack_locks_all' against |
5089 | - * the spin_unlock_wait() loads below, such that if |
5090 | - * nf_conntrack_lock() observes 'nf_conntrack_locks_all' |
5091 | - * we must observe nf_conntrack_locks[] held: |
5092 | - */ |
5093 | - smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */ |
5094 | + nf_conntrack_locks_all = true; |
5095 | |
5096 | for (i = 0; i < CONNTRACK_LOCKS; i++) { |
5097 | - spin_unlock_wait(&nf_conntrack_locks[i]); |
5098 | + spin_lock(&nf_conntrack_locks[i]); |
5099 | + |
5100 | + /* This spin_unlock provides the "release" to ensure that |
5101 | + * nf_conntrack_locks_all==true is visible to everyone that |
5102 | + * acquired spin_lock(&nf_conntrack_locks[]). |
5103 | + */ |
5104 | + spin_unlock(&nf_conntrack_locks[i]); |
5105 | } |
5106 | } |
5107 | |
5108 | static void nf_conntrack_all_unlock(void) |
5109 | { |
5110 | - /* |
5111 | - * All prior stores must be complete before we clear |
5112 | + /* All prior stores must be complete before we clear |
5113 | * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock() |
5114 | * might observe the false value but not the entire |
5115 | - * critical section: |
5116 | + * critical section. |
5117 | + * It pairs with the smp_load_acquire() in nf_conntrack_lock() |
5118 | */ |
5119 | smp_store_release(&nf_conntrack_locks_all, false); |
5120 | spin_unlock(&nf_conntrack_locks_all_lock); |
5121 | diff --git a/sound/core/seq_device.c b/sound/core/seq_device.c |
5122 | index c4acf17e9f5e..e40a2cba5002 100644 |
5123 | --- a/sound/core/seq_device.c |
5124 | +++ b/sound/core/seq_device.c |
5125 | @@ -148,8 +148,10 @@ void snd_seq_device_load_drivers(void) |
5126 | flush_work(&autoload_work); |
5127 | } |
5128 | EXPORT_SYMBOL(snd_seq_device_load_drivers); |
5129 | +#define cancel_autoload_drivers() cancel_work_sync(&autoload_work) |
5130 | #else |
5131 | #define queue_autoload_drivers() /* NOP */ |
5132 | +#define cancel_autoload_drivers() /* NOP */ |
5133 | #endif |
5134 | |
5135 | /* |
5136 | @@ -159,6 +161,7 @@ static int snd_seq_device_dev_free(struct snd_device *device) |
5137 | { |
5138 | struct snd_seq_device *dev = device->device_data; |
5139 | |
5140 | + cancel_autoload_drivers(); |
5141 | put_device(&dev->dev); |
5142 | return 0; |
5143 | } |