Magellan Linux

Contents of /trunk/kernel-alx/patches-4.14/0133-4.14.34-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (show annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 5 months ago) by niro
File size: 171983 byte(s)
-added up to patches-4.14.79
1 diff --git a/Makefile b/Makefile
2 index 00dd6af8eab4..a6906dfb112e 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 14
9 -SUBLEVEL = 33
10 +SUBLEVEL = 34
11 EXTRAVERSION =
12 NAME = Petit Gorille
13
14 diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
15 index 9319e1f0f1d8..379b4a03cfe2 100644
16 --- a/arch/arm/boot/dts/ls1021a.dtsi
17 +++ b/arch/arm/boot/dts/ls1021a.dtsi
18 @@ -155,7 +155,7 @@
19 };
20
21 esdhc: esdhc@1560000 {
22 - compatible = "fsl,esdhc";
23 + compatible = "fsl,ls1021a-esdhc", "fsl,esdhc";
24 reg = <0x0 0x1560000 0x0 0x10000>;
25 interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
26 clock-frequency = <0>;
27 diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
28 index 12fd81af1d1c..93dbf4889eb6 100644
29 --- a/arch/arm64/crypto/Makefile
30 +++ b/arch/arm64/crypto/Makefile
31 @@ -24,7 +24,7 @@ obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o
32 crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o
33
34 obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
35 -CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto
36 +aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o
37
38 obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o
39 aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o
40 diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
41 deleted file mode 100644
42 index 6a75cd75ed11..000000000000
43 --- a/arch/arm64/crypto/aes-ce-cipher.c
44 +++ /dev/null
45 @@ -1,281 +0,0 @@
46 -/*
47 - * aes-ce-cipher.c - core AES cipher using ARMv8 Crypto Extensions
48 - *
49 - * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
50 - *
51 - * This program is free software; you can redistribute it and/or modify
52 - * it under the terms of the GNU General Public License version 2 as
53 - * published by the Free Software Foundation.
54 - */
55 -
56 -#include <asm/neon.h>
57 -#include <asm/simd.h>
58 -#include <asm/unaligned.h>
59 -#include <crypto/aes.h>
60 -#include <linux/cpufeature.h>
61 -#include <linux/crypto.h>
62 -#include <linux/module.h>
63 -
64 -#include "aes-ce-setkey.h"
65 -
66 -MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
67 -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
68 -MODULE_LICENSE("GPL v2");
69 -
70 -asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
71 -asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
72 -
73 -struct aes_block {
74 - u8 b[AES_BLOCK_SIZE];
75 -};
76 -
77 -static int num_rounds(struct crypto_aes_ctx *ctx)
78 -{
79 - /*
80 - * # of rounds specified by AES:
81 - * 128 bit key 10 rounds
82 - * 192 bit key 12 rounds
83 - * 256 bit key 14 rounds
84 - * => n byte key => 6 + (n/4) rounds
85 - */
86 - return 6 + ctx->key_length / 4;
87 -}
88 -
89 -static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
90 -{
91 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
92 - struct aes_block *out = (struct aes_block *)dst;
93 - struct aes_block const *in = (struct aes_block *)src;
94 - void *dummy0;
95 - int dummy1;
96 -
97 - if (!may_use_simd()) {
98 - __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
99 - return;
100 - }
101 -
102 - kernel_neon_begin();
103 -
104 - __asm__(" ld1 {v0.16b}, %[in] ;"
105 - " ld1 {v1.4s}, [%[key]], #16 ;"
106 - " cmp %w[rounds], #10 ;"
107 - " bmi 0f ;"
108 - " bne 3f ;"
109 - " mov v3.16b, v1.16b ;"
110 - " b 2f ;"
111 - "0: mov v2.16b, v1.16b ;"
112 - " ld1 {v3.4s}, [%[key]], #16 ;"
113 - "1: aese v0.16b, v2.16b ;"
114 - " aesmc v0.16b, v0.16b ;"
115 - "2: ld1 {v1.4s}, [%[key]], #16 ;"
116 - " aese v0.16b, v3.16b ;"
117 - " aesmc v0.16b, v0.16b ;"
118 - "3: ld1 {v2.4s}, [%[key]], #16 ;"
119 - " subs %w[rounds], %w[rounds], #3 ;"
120 - " aese v0.16b, v1.16b ;"
121 - " aesmc v0.16b, v0.16b ;"
122 - " ld1 {v3.4s}, [%[key]], #16 ;"
123 - " bpl 1b ;"
124 - " aese v0.16b, v2.16b ;"
125 - " eor v0.16b, v0.16b, v3.16b ;"
126 - " st1 {v0.16b}, %[out] ;"
127 -
128 - : [out] "=Q"(*out),
129 - [key] "=r"(dummy0),
130 - [rounds] "=r"(dummy1)
131 - : [in] "Q"(*in),
132 - "1"(ctx->key_enc),
133 - "2"(num_rounds(ctx) - 2)
134 - : "cc");
135 -
136 - kernel_neon_end();
137 -}
138 -
139 -static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
140 -{
141 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
142 - struct aes_block *out = (struct aes_block *)dst;
143 - struct aes_block const *in = (struct aes_block *)src;
144 - void *dummy0;
145 - int dummy1;
146 -
147 - if (!may_use_simd()) {
148 - __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
149 - return;
150 - }
151 -
152 - kernel_neon_begin();
153 -
154 - __asm__(" ld1 {v0.16b}, %[in] ;"
155 - " ld1 {v1.4s}, [%[key]], #16 ;"
156 - " cmp %w[rounds], #10 ;"
157 - " bmi 0f ;"
158 - " bne 3f ;"
159 - " mov v3.16b, v1.16b ;"
160 - " b 2f ;"
161 - "0: mov v2.16b, v1.16b ;"
162 - " ld1 {v3.4s}, [%[key]], #16 ;"
163 - "1: aesd v0.16b, v2.16b ;"
164 - " aesimc v0.16b, v0.16b ;"
165 - "2: ld1 {v1.4s}, [%[key]], #16 ;"
166 - " aesd v0.16b, v3.16b ;"
167 - " aesimc v0.16b, v0.16b ;"
168 - "3: ld1 {v2.4s}, [%[key]], #16 ;"
169 - " subs %w[rounds], %w[rounds], #3 ;"
170 - " aesd v0.16b, v1.16b ;"
171 - " aesimc v0.16b, v0.16b ;"
172 - " ld1 {v3.4s}, [%[key]], #16 ;"
173 - " bpl 1b ;"
174 - " aesd v0.16b, v2.16b ;"
175 - " eor v0.16b, v0.16b, v3.16b ;"
176 - " st1 {v0.16b}, %[out] ;"
177 -
178 - : [out] "=Q"(*out),
179 - [key] "=r"(dummy0),
180 - [rounds] "=r"(dummy1)
181 - : [in] "Q"(*in),
182 - "1"(ctx->key_dec),
183 - "2"(num_rounds(ctx) - 2)
184 - : "cc");
185 -
186 - kernel_neon_end();
187 -}
188 -
189 -/*
190 - * aes_sub() - use the aese instruction to perform the AES sbox substitution
191 - * on each byte in 'input'
192 - */
193 -static u32 aes_sub(u32 input)
194 -{
195 - u32 ret;
196 -
197 - __asm__("dup v1.4s, %w[in] ;"
198 - "movi v0.16b, #0 ;"
199 - "aese v0.16b, v1.16b ;"
200 - "umov %w[out], v0.4s[0] ;"
201 -
202 - : [out] "=r"(ret)
203 - : [in] "r"(input)
204 - : "v0","v1");
205 -
206 - return ret;
207 -}
208 -
209 -int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
210 - unsigned int key_len)
211 -{
212 - /*
213 - * The AES key schedule round constants
214 - */
215 - static u8 const rcon[] = {
216 - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
217 - };
218 -
219 - u32 kwords = key_len / sizeof(u32);
220 - struct aes_block *key_enc, *key_dec;
221 - int i, j;
222 -
223 - if (key_len != AES_KEYSIZE_128 &&
224 - key_len != AES_KEYSIZE_192 &&
225 - key_len != AES_KEYSIZE_256)
226 - return -EINVAL;
227 -
228 - ctx->key_length = key_len;
229 - for (i = 0; i < kwords; i++)
230 - ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
231 -
232 - kernel_neon_begin();
233 - for (i = 0; i < sizeof(rcon); i++) {
234 - u32 *rki = ctx->key_enc + (i * kwords);
235 - u32 *rko = rki + kwords;
236 -
237 - rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
238 - rko[1] = rko[0] ^ rki[1];
239 - rko[2] = rko[1] ^ rki[2];
240 - rko[3] = rko[2] ^ rki[3];
241 -
242 - if (key_len == AES_KEYSIZE_192) {
243 - if (i >= 7)
244 - break;
245 - rko[4] = rko[3] ^ rki[4];
246 - rko[5] = rko[4] ^ rki[5];
247 - } else if (key_len == AES_KEYSIZE_256) {
248 - if (i >= 6)
249 - break;
250 - rko[4] = aes_sub(rko[3]) ^ rki[4];
251 - rko[5] = rko[4] ^ rki[5];
252 - rko[6] = rko[5] ^ rki[6];
253 - rko[7] = rko[6] ^ rki[7];
254 - }
255 - }
256 -
257 - /*
258 - * Generate the decryption keys for the Equivalent Inverse Cipher.
259 - * This involves reversing the order of the round keys, and applying
260 - * the Inverse Mix Columns transformation on all but the first and
261 - * the last one.
262 - */
263 - key_enc = (struct aes_block *)ctx->key_enc;
264 - key_dec = (struct aes_block *)ctx->key_dec;
265 - j = num_rounds(ctx);
266 -
267 - key_dec[0] = key_enc[j];
268 - for (i = 1, j--; j > 0; i++, j--)
269 - __asm__("ld1 {v0.4s}, %[in] ;"
270 - "aesimc v1.16b, v0.16b ;"
271 - "st1 {v1.4s}, %[out] ;"
272 -
273 - : [out] "=Q"(key_dec[i])
274 - : [in] "Q"(key_enc[j])
275 - : "v0","v1");
276 - key_dec[i] = key_enc[0];
277 -
278 - kernel_neon_end();
279 - return 0;
280 -}
281 -EXPORT_SYMBOL(ce_aes_expandkey);
282 -
283 -int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
284 - unsigned int key_len)
285 -{
286 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
287 - int ret;
288 -
289 - ret = ce_aes_expandkey(ctx, in_key, key_len);
290 - if (!ret)
291 - return 0;
292 -
293 - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
294 - return -EINVAL;
295 -}
296 -EXPORT_SYMBOL(ce_aes_setkey);
297 -
298 -static struct crypto_alg aes_alg = {
299 - .cra_name = "aes",
300 - .cra_driver_name = "aes-ce",
301 - .cra_priority = 250,
302 - .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
303 - .cra_blocksize = AES_BLOCK_SIZE,
304 - .cra_ctxsize = sizeof(struct crypto_aes_ctx),
305 - .cra_module = THIS_MODULE,
306 - .cra_cipher = {
307 - .cia_min_keysize = AES_MIN_KEY_SIZE,
308 - .cia_max_keysize = AES_MAX_KEY_SIZE,
309 - .cia_setkey = ce_aes_setkey,
310 - .cia_encrypt = aes_cipher_encrypt,
311 - .cia_decrypt = aes_cipher_decrypt
312 - }
313 -};
314 -
315 -static int __init aes_mod_init(void)
316 -{
317 - return crypto_register_alg(&aes_alg);
318 -}
319 -
320 -static void __exit aes_mod_exit(void)
321 -{
322 - crypto_unregister_alg(&aes_alg);
323 -}
324 -
325 -module_cpu_feature_match(AES, aes_mod_init);
326 -module_exit(aes_mod_exit);
327 diff --git a/arch/arm64/crypto/aes-ce-core.S b/arch/arm64/crypto/aes-ce-core.S
328 new file mode 100644
329 index 000000000000..8efdfdade393
330 --- /dev/null
331 +++ b/arch/arm64/crypto/aes-ce-core.S
332 @@ -0,0 +1,87 @@
333 +/*
334 + * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
335 + *
336 + * This program is free software; you can redistribute it and/or modify
337 + * it under the terms of the GNU General Public License version 2 as
338 + * published by the Free Software Foundation.
339 + */
340 +
341 +#include <linux/linkage.h>
342 +#include <asm/assembler.h>
343 +
344 + .arch armv8-a+crypto
345 +
346 +ENTRY(__aes_ce_encrypt)
347 + sub w3, w3, #2
348 + ld1 {v0.16b}, [x2]
349 + ld1 {v1.4s}, [x0], #16
350 + cmp w3, #10
351 + bmi 0f
352 + bne 3f
353 + mov v3.16b, v1.16b
354 + b 2f
355 +0: mov v2.16b, v1.16b
356 + ld1 {v3.4s}, [x0], #16
357 +1: aese v0.16b, v2.16b
358 + aesmc v0.16b, v0.16b
359 +2: ld1 {v1.4s}, [x0], #16
360 + aese v0.16b, v3.16b
361 + aesmc v0.16b, v0.16b
362 +3: ld1 {v2.4s}, [x0], #16
363 + subs w3, w3, #3
364 + aese v0.16b, v1.16b
365 + aesmc v0.16b, v0.16b
366 + ld1 {v3.4s}, [x0], #16
367 + bpl 1b
368 + aese v0.16b, v2.16b
369 + eor v0.16b, v0.16b, v3.16b
370 + st1 {v0.16b}, [x1]
371 + ret
372 +ENDPROC(__aes_ce_encrypt)
373 +
374 +ENTRY(__aes_ce_decrypt)
375 + sub w3, w3, #2
376 + ld1 {v0.16b}, [x2]
377 + ld1 {v1.4s}, [x0], #16
378 + cmp w3, #10
379 + bmi 0f
380 + bne 3f
381 + mov v3.16b, v1.16b
382 + b 2f
383 +0: mov v2.16b, v1.16b
384 + ld1 {v3.4s}, [x0], #16
385 +1: aesd v0.16b, v2.16b
386 + aesimc v0.16b, v0.16b
387 +2: ld1 {v1.4s}, [x0], #16
388 + aesd v0.16b, v3.16b
389 + aesimc v0.16b, v0.16b
390 +3: ld1 {v2.4s}, [x0], #16
391 + subs w3, w3, #3
392 + aesd v0.16b, v1.16b
393 + aesimc v0.16b, v0.16b
394 + ld1 {v3.4s}, [x0], #16
395 + bpl 1b
396 + aesd v0.16b, v2.16b
397 + eor v0.16b, v0.16b, v3.16b
398 + st1 {v0.16b}, [x1]
399 + ret
400 +ENDPROC(__aes_ce_decrypt)
401 +
402 +/*
403 + * __aes_ce_sub() - use the aese instruction to perform the AES sbox
404 + * substitution on each byte in 'input'
405 + */
406 +ENTRY(__aes_ce_sub)
407 + dup v1.4s, w0
408 + movi v0.16b, #0
409 + aese v0.16b, v1.16b
410 + umov w0, v0.s[0]
411 + ret
412 +ENDPROC(__aes_ce_sub)
413 +
414 +ENTRY(__aes_ce_invert)
415 + ld1 {v0.4s}, [x1]
416 + aesimc v1.16b, v0.16b
417 + st1 {v1.4s}, [x0]
418 + ret
419 +ENDPROC(__aes_ce_invert)
420 diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c
421 new file mode 100644
422 index 000000000000..e6b3227bbf57
423 --- /dev/null
424 +++ b/arch/arm64/crypto/aes-ce-glue.c
425 @@ -0,0 +1,190 @@
426 +/*
427 + * aes-ce-cipher.c - core AES cipher using ARMv8 Crypto Extensions
428 + *
429 + * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
430 + *
431 + * This program is free software; you can redistribute it and/or modify
432 + * it under the terms of the GNU General Public License version 2 as
433 + * published by the Free Software Foundation.
434 + */
435 +
436 +#include <asm/neon.h>
437 +#include <asm/simd.h>
438 +#include <asm/unaligned.h>
439 +#include <crypto/aes.h>
440 +#include <linux/cpufeature.h>
441 +#include <linux/crypto.h>
442 +#include <linux/module.h>
443 +
444 +#include "aes-ce-setkey.h"
445 +
446 +MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
447 +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
448 +MODULE_LICENSE("GPL v2");
449 +
450 +asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
451 +asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
452 +
453 +struct aes_block {
454 + u8 b[AES_BLOCK_SIZE];
455 +};
456 +
457 +asmlinkage void __aes_ce_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
458 +asmlinkage void __aes_ce_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
459 +
460 +asmlinkage u32 __aes_ce_sub(u32 l);
461 +asmlinkage void __aes_ce_invert(struct aes_block *out,
462 + const struct aes_block *in);
463 +
464 +static int num_rounds(struct crypto_aes_ctx *ctx)
465 +{
466 + /*
467 + * # of rounds specified by AES:
468 + * 128 bit key 10 rounds
469 + * 192 bit key 12 rounds
470 + * 256 bit key 14 rounds
471 + * => n byte key => 6 + (n/4) rounds
472 + */
473 + return 6 + ctx->key_length / 4;
474 +}
475 +
476 +static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
477 +{
478 + struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
479 +
480 + if (!may_use_simd()) {
481 + __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
482 + return;
483 + }
484 +
485 + kernel_neon_begin();
486 + __aes_ce_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
487 + kernel_neon_end();
488 +}
489 +
490 +static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
491 +{
492 + struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
493 +
494 + if (!may_use_simd()) {
495 + __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
496 + return;
497 + }
498 +
499 + kernel_neon_begin();
500 + __aes_ce_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
501 + kernel_neon_end();
502 +}
503 +
504 +int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
505 + unsigned int key_len)
506 +{
507 + /*
508 + * The AES key schedule round constants
509 + */
510 + static u8 const rcon[] = {
511 + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
512 + };
513 +
514 + u32 kwords = key_len / sizeof(u32);
515 + struct aes_block *key_enc, *key_dec;
516 + int i, j;
517 +
518 + if (key_len != AES_KEYSIZE_128 &&
519 + key_len != AES_KEYSIZE_192 &&
520 + key_len != AES_KEYSIZE_256)
521 + return -EINVAL;
522 +
523 + ctx->key_length = key_len;
524 + for (i = 0; i < kwords; i++)
525 + ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
526 +
527 + kernel_neon_begin();
528 + for (i = 0; i < sizeof(rcon); i++) {
529 + u32 *rki = ctx->key_enc + (i * kwords);
530 + u32 *rko = rki + kwords;
531 +
532 + rko[0] = ror32(__aes_ce_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
533 + rko[1] = rko[0] ^ rki[1];
534 + rko[2] = rko[1] ^ rki[2];
535 + rko[3] = rko[2] ^ rki[3];
536 +
537 + if (key_len == AES_KEYSIZE_192) {
538 + if (i >= 7)
539 + break;
540 + rko[4] = rko[3] ^ rki[4];
541 + rko[5] = rko[4] ^ rki[5];
542 + } else if (key_len == AES_KEYSIZE_256) {
543 + if (i >= 6)
544 + break;
545 + rko[4] = __aes_ce_sub(rko[3]) ^ rki[4];
546 + rko[5] = rko[4] ^ rki[5];
547 + rko[6] = rko[5] ^ rki[6];
548 + rko[7] = rko[6] ^ rki[7];
549 + }
550 + }
551 +
552 + /*
553 + * Generate the decryption keys for the Equivalent Inverse Cipher.
554 + * This involves reversing the order of the round keys, and applying
555 + * the Inverse Mix Columns transformation on all but the first and
556 + * the last one.
557 + */
558 + key_enc = (struct aes_block *)ctx->key_enc;
559 + key_dec = (struct aes_block *)ctx->key_dec;
560 + j = num_rounds(ctx);
561 +
562 + key_dec[0] = key_enc[j];
563 + for (i = 1, j--; j > 0; i++, j--)
564 + __aes_ce_invert(key_dec + i, key_enc + j);
565 + key_dec[i] = key_enc[0];
566 +
567 + kernel_neon_end();
568 + return 0;
569 +}
570 +EXPORT_SYMBOL(ce_aes_expandkey);
571 +
572 +int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
573 + unsigned int key_len)
574 +{
575 + struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
576 + int ret;
577 +
578 + ret = ce_aes_expandkey(ctx, in_key, key_len);
579 + if (!ret)
580 + return 0;
581 +
582 + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
583 + return -EINVAL;
584 +}
585 +EXPORT_SYMBOL(ce_aes_setkey);
586 +
587 +static struct crypto_alg aes_alg = {
588 + .cra_name = "aes",
589 + .cra_driver_name = "aes-ce",
590 + .cra_priority = 250,
591 + .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
592 + .cra_blocksize = AES_BLOCK_SIZE,
593 + .cra_ctxsize = sizeof(struct crypto_aes_ctx),
594 + .cra_module = THIS_MODULE,
595 + .cra_cipher = {
596 + .cia_min_keysize = AES_MIN_KEY_SIZE,
597 + .cia_max_keysize = AES_MAX_KEY_SIZE,
598 + .cia_setkey = ce_aes_setkey,
599 + .cia_encrypt = aes_cipher_encrypt,
600 + .cia_decrypt = aes_cipher_decrypt
601 + }
602 +};
603 +
604 +static int __init aes_mod_init(void)
605 +{
606 + return crypto_register_alg(&aes_alg);
607 +}
608 +
609 +static void __exit aes_mod_exit(void)
610 +{
611 + crypto_unregister_alg(&aes_alg);
612 +}
613 +
614 +module_cpu_feature_match(AES, aes_mod_init);
615 +module_exit(aes_mod_exit);
616 diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
617 index 55520cec8b27..6cf0e4cb7b97 100644
618 --- a/arch/x86/include/asm/microcode.h
619 +++ b/arch/x86/include/asm/microcode.h
620 @@ -37,7 +37,13 @@ struct cpu_signature {
621
622 struct device;
623
624 -enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
625 +enum ucode_state {
626 + UCODE_OK = 0,
627 + UCODE_NEW,
628 + UCODE_UPDATED,
629 + UCODE_NFOUND,
630 + UCODE_ERROR,
631 +};
632
633 struct microcode_ops {
634 enum ucode_state (*request_microcode_user) (int cpu,
635 @@ -54,7 +60,7 @@ struct microcode_ops {
636 * are being called.
637 * See also the "Synchronization" section in microcode_core.c.
638 */
639 - int (*apply_microcode) (int cpu);
640 + enum ucode_state (*apply_microcode) (int cpu);
641 int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
642 };
643
644 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
645 index 15fc074bd628..3222c7746cb1 100644
646 --- a/arch/x86/include/asm/processor.h
647 +++ b/arch/x86/include/asm/processor.h
648 @@ -968,4 +968,5 @@ bool xen_set_default_idle(void);
649
650 void stop_this_cpu(void *dummy);
651 void df_debug(struct pt_regs *regs, long error_code);
652 +void microcode_check(void);
653 #endif /* _ASM_X86_PROCESSOR_H */
654 diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
655 index f5d92bc3b884..2c4d5ece7456 100644
656 --- a/arch/x86/kernel/aperture_64.c
657 +++ b/arch/x86/kernel/aperture_64.c
658 @@ -30,6 +30,7 @@
659 #include <asm/dma.h>
660 #include <asm/amd_nb.h>
661 #include <asm/x86_init.h>
662 +#include <linux/crash_dump.h>
663
664 /*
665 * Using 512M as goal, in case kexec will load kernel_big
666 @@ -56,6 +57,33 @@ int fallback_aper_force __initdata;
667
668 int fix_aperture __initdata = 1;
669
670 +#ifdef CONFIG_PROC_VMCORE
671 +/*
672 + * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
673 + * use the same range because it will remain configured in the northbridge.
674 + * Trying to dump this area via /proc/vmcore may crash the machine, so exclude
675 + * it from vmcore.
676 + */
677 +static unsigned long aperture_pfn_start, aperture_page_count;
678 +
679 +static int gart_oldmem_pfn_is_ram(unsigned long pfn)
680 +{
681 + return likely((pfn < aperture_pfn_start) ||
682 + (pfn >= aperture_pfn_start + aperture_page_count));
683 +}
684 +
685 +static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
686 +{
687 + aperture_pfn_start = aper_base >> PAGE_SHIFT;
688 + aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
689 + WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
690 +}
691 +#else
692 +static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
693 +{
694 +}
695 +#endif
696 +
697 /* This code runs before the PCI subsystem is initialized, so just
698 access the northbridge directly. */
699
700 @@ -435,8 +463,16 @@ int __init gart_iommu_hole_init(void)
701
702 out:
703 if (!fix && !fallback_aper_force) {
704 - if (last_aper_base)
705 + if (last_aper_base) {
706 + /*
707 + * If this is the kdump kernel, the first kernel
708 + * may have allocated the range over its e820 RAM
709 + * and fixed up the northbridge
710 + */
711 + exclude_from_vmcore(last_aper_base, last_aper_order);
712 +
713 return 1;
714 + }
715 return 0;
716 }
717
718 @@ -473,6 +509,14 @@ int __init gart_iommu_hole_init(void)
719 return 0;
720 }
721
722 + /*
723 + * If this is the kdump kernel _and_ the first kernel did not
724 + * configure the aperture in the northbridge, this range may
725 + * overlap with the first kernel's memory. We can't access the
726 + * range through vmcore even though it should be part of the dump.
727 + */
728 + exclude_from_vmcore(aper_alloc, aper_order);
729 +
730 /* Fix up the north bridges */
731 for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
732 int bus, dev_base, dev_limit;
733 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
734 index 651b7afed4da..cf6380200dc2 100644
735 --- a/arch/x86/kernel/cpu/common.c
736 +++ b/arch/x86/kernel/cpu/common.c
737 @@ -1724,3 +1724,33 @@ static int __init init_cpu_syscore(void)
738 return 0;
739 }
740 core_initcall(init_cpu_syscore);
741 +
742 +/*
743 + * The microcode loader calls this upon late microcode load to recheck features,
744 + * only when microcode has been updated. Caller holds microcode_mutex and CPU
745 + * hotplug lock.
746 + */
747 +void microcode_check(void)
748 +{
749 + struct cpuinfo_x86 info;
750 +
751 + perf_check_microcode();
752 +
753 + /* Reload CPUID max function as it might've changed. */
754 + info.cpuid_level = cpuid_eax(0);
755 +
756 + /*
757 + * Copy all capability leafs to pick up the synthetic ones so that
758 + * memcmp() below doesn't fail on that. The ones coming from CPUID will
759 + * get overwritten in get_cpu_cap().
760 + */
761 + memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
762 +
763 + get_cpu_cap(&info);
764 +
765 + if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
766 + return;
767 +
768 + pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
769 + pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
770 +}
771 diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
772 index 330b8462d426..48179928ff38 100644
773 --- a/arch/x86/kernel/cpu/microcode/amd.c
774 +++ b/arch/x86/kernel/cpu/microcode/amd.c
775 @@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
776 return -EINVAL;
777
778 ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
779 - if (ret != UCODE_OK)
780 + if (ret > UCODE_UPDATED)
781 return -EINVAL;
782
783 return 0;
784 @@ -498,7 +498,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
785 return patch_size;
786 }
787
788 -static int apply_microcode_amd(int cpu)
789 +static enum ucode_state apply_microcode_amd(int cpu)
790 {
791 struct cpuinfo_x86 *c = &cpu_data(cpu);
792 struct microcode_amd *mc_amd;
793 @@ -512,7 +512,7 @@ static int apply_microcode_amd(int cpu)
794
795 p = find_patch(cpu);
796 if (!p)
797 - return 0;
798 + return UCODE_NFOUND;
799
800 mc_amd = p->data;
801 uci->mc = p->data;
802 @@ -523,13 +523,13 @@ static int apply_microcode_amd(int cpu)
803 if (rev >= mc_amd->hdr.patch_id) {
804 c->microcode = rev;
805 uci->cpu_sig.rev = rev;
806 - return 0;
807 + return UCODE_OK;
808 }
809
810 if (__apply_microcode_amd(mc_amd)) {
811 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
812 cpu, mc_amd->hdr.patch_id);
813 - return -1;
814 + return UCODE_ERROR;
815 }
816 pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
817 mc_amd->hdr.patch_id);
818 @@ -537,7 +537,7 @@ static int apply_microcode_amd(int cpu)
819 uci->cpu_sig.rev = mc_amd->hdr.patch_id;
820 c->microcode = mc_amd->hdr.patch_id;
821
822 - return 0;
823 + return UCODE_UPDATED;
824 }
825
826 static int install_equiv_cpu_table(const u8 *buf)
827 @@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
828 static enum ucode_state
829 load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
830 {
831 + struct ucode_patch *p;
832 enum ucode_state ret;
833
834 /* free old equiv table */
835 free_equiv_cpu_table();
836
837 ret = __load_microcode_amd(family, data, size);
838 -
839 - if (ret != UCODE_OK)
840 + if (ret != UCODE_OK) {
841 cleanup();
842 + return ret;
843 + }
844
845 -#ifdef CONFIG_X86_32
846 - /* save BSP's matching patch for early load */
847 - if (save) {
848 - struct ucode_patch *p = find_patch(0);
849 - if (p) {
850 - memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
851 - memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
852 - PATCH_MAX_SIZE));
853 - }
854 + p = find_patch(0);
855 + if (!p) {
856 + return ret;
857 + } else {
858 + if (boot_cpu_data.microcode == p->patch_id)
859 + return ret;
860 +
861 + ret = UCODE_NEW;
862 }
863 -#endif
864 +
865 + /* save BSP's matching patch for early load */
866 + if (!save)
867 + return ret;
868 +
869 + memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
870 + memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
871 +
872 return ret;
873 }
874
875 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
876 index e4fc595cd6ea..021c90464cc2 100644
877 --- a/arch/x86/kernel/cpu/microcode/core.c
878 +++ b/arch/x86/kernel/cpu/microcode/core.c
879 @@ -22,13 +22,16 @@
880 #define pr_fmt(fmt) "microcode: " fmt
881
882 #include <linux/platform_device.h>
883 +#include <linux/stop_machine.h>
884 #include <linux/syscore_ops.h>
885 #include <linux/miscdevice.h>
886 #include <linux/capability.h>
887 #include <linux/firmware.h>
888 #include <linux/kernel.h>
889 +#include <linux/delay.h>
890 #include <linux/mutex.h>
891 #include <linux/cpu.h>
892 +#include <linux/nmi.h>
893 #include <linux/fs.h>
894 #include <linux/mm.h>
895
896 @@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
897 */
898 static DEFINE_MUTEX(microcode_mutex);
899
900 +/*
901 + * Serialize late loading so that CPUs get updated one-by-one.
902 + */
903 +static DEFINE_SPINLOCK(update_lock);
904 +
905 struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
906
907 struct cpu_info_ctx {
908 @@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu)
909 return ret;
910 }
911
912 -struct apply_microcode_ctx {
913 - int err;
914 -};
915 -
916 static void apply_microcode_local(void *arg)
917 {
918 - struct apply_microcode_ctx *ctx = arg;
919 + enum ucode_state *err = arg;
920
921 - ctx->err = microcode_ops->apply_microcode(smp_processor_id());
922 + *err = microcode_ops->apply_microcode(smp_processor_id());
923 }
924
925 static int apply_microcode_on_target(int cpu)
926 {
927 - struct apply_microcode_ctx ctx = { .err = 0 };
928 + enum ucode_state err;
929 int ret;
930
931 - ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
932 - if (!ret)
933 - ret = ctx.err;
934 -
935 + ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
936 + if (!ret) {
937 + if (err == UCODE_ERROR)
938 + ret = 1;
939 + }
940 return ret;
941 }
942
943 @@ -489,31 +494,124 @@ static void __exit microcode_dev_exit(void)
944 /* fake device for request_firmware */
945 static struct platform_device *microcode_pdev;
946
947 -static int reload_for_cpu(int cpu)
948 +/*
949 + * Late loading dance. Why the heavy-handed stomp_machine effort?
950 + *
951 + * - HT siblings must be idle and not execute other code while the other sibling
952 + * is loading microcode in order to avoid any negative interactions caused by
953 + * the loading.
954 + *
955 + * - In addition, microcode update on the cores must be serialized until this
956 + * requirement can be relaxed in the future. Right now, this is conservative
957 + * and good.
958 + */
959 +#define SPINUNIT 100 /* 100 nsec */
960 +
961 +static int check_online_cpus(void)
962 {
963 - struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
964 - enum ucode_state ustate;
965 - int err = 0;
966 + if (num_online_cpus() == num_present_cpus())
967 + return 0;
968
969 - if (!uci->valid)
970 - return err;
971 + pr_err("Not all CPUs online, aborting microcode update.\n");
972
973 - ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
974 - if (ustate == UCODE_OK)
975 - apply_microcode_on_target(cpu);
976 - else
977 - if (ustate == UCODE_ERROR)
978 - err = -EINVAL;
979 - return err;
980 + return -EINVAL;
981 +}
982 +
983 +static atomic_t late_cpus_in;
984 +static atomic_t late_cpus_out;
985 +
986 +static int __wait_for_cpus(atomic_t *t, long long timeout)
987 +{
988 + int all_cpus = num_online_cpus();
989 +
990 + atomic_inc(t);
991 +
992 + while (atomic_read(t) < all_cpus) {
993 + if (timeout < SPINUNIT) {
994 + pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
995 + all_cpus - atomic_read(t));
996 + return 1;
997 + }
998 +
999 + ndelay(SPINUNIT);
1000 + timeout -= SPINUNIT;
1001 +
1002 + touch_nmi_watchdog();
1003 + }
1004 + return 0;
1005 +}
1006 +
1007 +/*
1008 + * Returns:
1009 + * < 0 - on error
1010 + * 0 - no update done
1011 + * 1 - microcode was updated
1012 + */
1013 +static int __reload_late(void *info)
1014 +{
1015 + int cpu = smp_processor_id();
1016 + enum ucode_state err;
1017 + int ret = 0;
1018 +
1019 + /*
1020 + * Wait for all CPUs to arrive. A load will not be attempted unless all
1021 + * CPUs show up.
1022 + * */
1023 + if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
1024 + return -1;
1025 +
1026 + spin_lock(&update_lock);
1027 + apply_microcode_local(&err);
1028 + spin_unlock(&update_lock);
1029 +
1030 + if (err > UCODE_NFOUND) {
1031 + pr_warn("Error reloading microcode on CPU %d\n", cpu);
1032 + return -1;
1033 + /* siblings return UCODE_OK because their engine got updated already */
1034 + } else if (err == UCODE_UPDATED || err == UCODE_OK) {
1035 + ret = 1;
1036 + } else {
1037 + return ret;
1038 + }
1039 +
1040 + /*
1041 + * Increase the wait timeout to a safe value here since we're
1042 + * serializing the microcode update and that could take a while on a
1043 + * large number of CPUs. And that is fine as the *actual* timeout will
1044 + * be determined by the last CPU finished updating and thus cut short.
1045 + */
1046 + if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
1047 + panic("Timeout during microcode update!\n");
1048 +
1049 + return ret;
1050 +}
1051 +
1052 +/*
1053 + * Reload microcode late on all CPUs. Wait for a sec until they
1054 + * all gather together.
1055 + */
1056 +static int microcode_reload_late(void)
1057 +{
1058 + int ret;
1059 +
1060 + atomic_set(&late_cpus_in, 0);
1061 + atomic_set(&late_cpus_out, 0);
1062 +
1063 + ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
1064 + if (ret > 0)
1065 + microcode_check();
1066 +
1067 + return ret;
1068 }
1069
1070 static ssize_t reload_store(struct device *dev,
1071 struct device_attribute *attr,
1072 const char *buf, size_t size)
1073 {
1074 + enum ucode_state tmp_ret = UCODE_OK;
1075 + int bsp = boot_cpu_data.cpu_index;
1076 unsigned long val;
1077 - int cpu;
1078 - ssize_t ret = 0, tmp_ret;
1079 + ssize_t ret = 0;
1080
1081 ret = kstrtoul(buf, 0, &val);
1082 if (ret)
1083 @@ -522,23 +620,24 @@ static ssize_t reload_store(struct device *dev,
1084 if (val != 1)
1085 return size;
1086
1087 + tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
1088 + if (tmp_ret != UCODE_NEW)
1089 + return size;
1090 +
1091 get_online_cpus();
1092 - mutex_lock(&microcode_mutex);
1093 - for_each_online_cpu(cpu) {
1094 - tmp_ret = reload_for_cpu(cpu);
1095 - if (tmp_ret != 0)
1096 - pr_warn("Error reloading microcode on CPU %d\n", cpu);
1097
1098 - /* save retval of the first encountered reload error */
1099 - if (!ret)
1100 - ret = tmp_ret;
1101 - }
1102 - if (!ret)
1103 - perf_check_microcode();
1104 + ret = check_online_cpus();
1105 + if (ret)
1106 + goto put;
1107 +
1108 + mutex_lock(&microcode_mutex);
1109 + ret = microcode_reload_late();
1110 mutex_unlock(&microcode_mutex);
1111 +
1112 +put:
1113 put_online_cpus();
1114
1115 - if (!ret)
1116 + if (ret >= 0)
1117 ret = size;
1118
1119 return ret;
1120 @@ -606,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
1121 if (system_state != SYSTEM_RUNNING)
1122 return UCODE_NFOUND;
1123
1124 - ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
1125 - refresh_fw);
1126 -
1127 - if (ustate == UCODE_OK) {
1128 + ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, refresh_fw);
1129 + if (ustate == UCODE_NEW) {
1130 pr_debug("CPU%d updated upon init\n", cpu);
1131 apply_microcode_on_target(cpu);
1132 }
1133 diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
1134 index a15db2b4e0d6..32b8e5724f96 100644
1135 --- a/arch/x86/kernel/cpu/microcode/intel.c
1136 +++ b/arch/x86/kernel/cpu/microcode/intel.c
1137 @@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
1138 if (!mc)
1139 return 0;
1140
1141 + /*
1142 + * Save us the MSR write below - which is a particular expensive
1143 + * operation - when the other hyperthread has updated the microcode
1144 + * already.
1145 + */
1146 + rev = intel_get_microcode_revision();
1147 + if (rev >= mc->hdr.rev) {
1148 + uci->cpu_sig.rev = rev;
1149 + return UCODE_OK;
1150 + }
1151 +
1152 + /*
1153 + * Writeback and invalidate caches before updating microcode to avoid
1154 + * internal issues depending on what the microcode is updating.
1155 + */
1156 + native_wbinvd();
1157 +
1158 /* write microcode via MSR 0x79 */
1159 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
1160
1161 @@ -772,27 +789,44 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
1162 return 0;
1163 }
1164
1165 -static int apply_microcode_intel(int cpu)
1166 +static enum ucode_state apply_microcode_intel(int cpu)
1167 {
1168 + struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
1169 + struct cpuinfo_x86 *c = &cpu_data(cpu);
1170 struct microcode_intel *mc;
1171 - struct ucode_cpu_info *uci;
1172 - struct cpuinfo_x86 *c;
1173 static int prev_rev;
1174 u32 rev;
1175
1176 /* We should bind the task to the CPU */
1177 if (WARN_ON(raw_smp_processor_id() != cpu))
1178 - return -1;
1179 + return UCODE_ERROR;
1180
1181 - uci = ucode_cpu_info + cpu;
1182 - mc = uci->mc;
1183 + /* Look for a newer patch in our cache: */
1184 + mc = find_patch(uci);
1185 if (!mc) {
1186 - /* Look for a newer patch in our cache: */
1187 - mc = find_patch(uci);
1188 + mc = uci->mc;
1189 if (!mc)
1190 - return 0;
1191 + return UCODE_NFOUND;
1192 }
1193
1194 + /*
1195 + * Save us the MSR write below - which is a particular expensive
1196 + * operation - when the other hyperthread has updated the microcode
1197 + * already.
1198 + */
1199 + rev = intel_get_microcode_revision();
1200 + if (rev >= mc->hdr.rev) {
1201 + uci->cpu_sig.rev = rev;
1202 + c->microcode = rev;
1203 + return UCODE_OK;
1204 + }
1205 +
1206 + /*
1207 + * Writeback and invalidate caches before updating microcode to avoid
1208 + * internal issues depending on what the microcode is updating.
1209 + */
1210 + native_wbinvd();
1211 +
1212 /* write microcode via MSR 0x79 */
1213 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
1214
1215 @@ -801,7 +835,7 @@ static int apply_microcode_intel(int cpu)
1216 if (rev != mc->hdr.rev) {
1217 pr_err("CPU%d update to revision 0x%x failed\n",
1218 cpu, mc->hdr.rev);
1219 - return -1;
1220 + return UCODE_ERROR;
1221 }
1222
1223 if (rev != prev_rev) {
1224 @@ -813,12 +847,10 @@ static int apply_microcode_intel(int cpu)
1225 prev_rev = rev;
1226 }
1227
1228 - c = &cpu_data(cpu);
1229 -
1230 uci->cpu_sig.rev = rev;
1231 c->microcode = rev;
1232
1233 - return 0;
1234 + return UCODE_UPDATED;
1235 }
1236
1237 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
1238 @@ -830,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
1239 unsigned int leftover = size;
1240 unsigned int curr_mc_size = 0, new_mc_size = 0;
1241 unsigned int csig, cpf;
1242 + enum ucode_state ret = UCODE_OK;
1243
1244 while (leftover) {
1245 struct microcode_header_intel mc_header;
1246 @@ -871,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
1247 new_mc = mc;
1248 new_mc_size = mc_size;
1249 mc = NULL; /* trigger new vmalloc */
1250 + ret = UCODE_NEW;
1251 }
1252
1253 ucode_ptr += mc_size;
1254 @@ -900,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
1255 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
1256 cpu, new_rev, uci->cpu_sig.rev);
1257
1258 - return UCODE_OK;
1259 + return ret;
1260 }
1261
1262 static int get_ucode_fw(void *to, const void *from, size_t n)
1263 diff --git a/arch/x86/xen/mmu_hvm.c b/arch/x86/xen/mmu_hvm.c
1264 index 2cfcfe4f6b2a..dd2ad82eee80 100644
1265 --- a/arch/x86/xen/mmu_hvm.c
1266 +++ b/arch/x86/xen/mmu_hvm.c
1267 @@ -75,6 +75,6 @@ void __init xen_hvm_init_mmu_ops(void)
1268 if (is_pagetable_dying_supported())
1269 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
1270 #ifdef CONFIG_PROC_VMCORE
1271 - register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
1272 + WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram));
1273 #endif
1274 }
1275 diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
1276 index ceefb9a706d6..5d53e504acae 100644
1277 --- a/block/bfq-cgroup.c
1278 +++ b/block/bfq-cgroup.c
1279 @@ -749,10 +749,11 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
1280 unsigned long flags;
1281 int i;
1282
1283 + spin_lock_irqsave(&bfqd->lock, flags);
1284 +
1285 if (!entity) /* root group */
1286 - return;
1287 + goto put_async_queues;
1288
1289 - spin_lock_irqsave(&bfqd->lock, flags);
1290 /*
1291 * Empty all service_trees belonging to this group before
1292 * deactivating the group itself.
1293 @@ -783,6 +784,8 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
1294 }
1295
1296 __bfq_deactivate_entity(entity, false);
1297 +
1298 +put_async_queues:
1299 bfq_put_async_queues(bfqd, bfqg);
1300
1301 spin_unlock_irqrestore(&bfqd->lock, flags);
1302 diff --git a/block/blk-mq.c b/block/blk-mq.c
1303 index f1fb126a3be5..6f899669cbdd 100644
1304 --- a/block/blk-mq.c
1305 +++ b/block/blk-mq.c
1306 @@ -1928,7 +1928,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
1307 {
1308 blk_mq_debugfs_unregister_hctx(hctx);
1309
1310 - blk_mq_tag_idle(hctx);
1311 + if (blk_mq_hw_queue_mapped(hctx))
1312 + blk_mq_tag_idle(hctx);
1313
1314 if (set->ops->exit_request)
1315 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
1316 @@ -2314,6 +2315,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
1317 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
1318
1319 blk_mq_sysfs_unregister(q);
1320 +
1321 + /* protect against switching io scheduler */
1322 + mutex_lock(&q->sysfs_lock);
1323 for (i = 0; i < set->nr_hw_queues; i++) {
1324 int node;
1325
1326 @@ -2358,6 +2362,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
1327 }
1328 }
1329 q->nr_hw_queues = i;
1330 + mutex_unlock(&q->sysfs_lock);
1331 blk_mq_sysfs_register(q);
1332 }
1333
1334 @@ -2528,9 +2533,27 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1335
1336 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
1337 {
1338 - if (set->ops->map_queues)
1339 + if (set->ops->map_queues) {
1340 + int cpu;
1341 + /*
1342 + * transport .map_queues is usually done in the following
1343 + * way:
1344 + *
1345 + * for (queue = 0; queue < set->nr_hw_queues; queue++) {
1346 + * mask = get_cpu_mask(queue)
1347 + * for_each_cpu(cpu, mask)
1348 + * set->mq_map[cpu] = queue;
1349 + * }
1350 + *
1351 + * When we need to remap, the table has to be cleared for
1352 + * killing stale mapping since one CPU may not be mapped
1353 + * to any hw queue.
1354 + */
1355 + for_each_possible_cpu(cpu)
1356 + set->mq_map[cpu] = 0;
1357 +
1358 return set->ops->map_queues(set);
1359 - else
1360 + } else
1361 return blk_mq_map_queues(set);
1362 }
1363
1364 diff --git a/crypto/Makefile b/crypto/Makefile
1365 index da190be60ce2..adaf2c63baeb 100644
1366 --- a/crypto/Makefile
1367 +++ b/crypto/Makefile
1368 @@ -98,6 +98,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
1369 obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
1370 CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
1371 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
1372 +CFLAGS_aes_generic.o := $(call cc-ifversion, -ge, 0701, -Os) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
1373 obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
1374 obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
1375 obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
1376 diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
1377 index 0972ec0e2eb8..f53ccc680238 100644
1378 --- a/drivers/acpi/acpi_video.c
1379 +++ b/drivers/acpi/acpi_video.c
1380 @@ -80,8 +80,8 @@ MODULE_PARM_DESC(report_key_events,
1381 static bool device_id_scheme = false;
1382 module_param(device_id_scheme, bool, 0444);
1383
1384 -static bool only_lcd = false;
1385 -module_param(only_lcd, bool, 0444);
1386 +static int only_lcd = -1;
1387 +module_param(only_lcd, int, 0444);
1388
1389 static int register_count;
1390 static DEFINE_MUTEX(register_count_mutex);
1391 @@ -2136,6 +2136,16 @@ int acpi_video_register(void)
1392 goto leave;
1393 }
1394
1395 + /*
1396 + * We're seeing a lot of bogus backlight interfaces on newer machines
1397 + * without a LCD such as desktops, servers and HDMI sticks. Checking
1398 + * the lcd flag fixes this, so enable this on any machines which are
1399 + * win8 ready (where we also prefer the native backlight driver, so
1400 + * normally the acpi_video code should not register there anyways).
1401 + */
1402 + if (only_lcd == -1)
1403 + only_lcd = acpi_osi_is_win8();
1404 +
1405 dmi_check_system(video_dmi_table);
1406
1407 ret = acpi_bus_register_driver(&acpi_video_bus);
1408 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1409 index df842465634a..6adcda057b36 100644
1410 --- a/drivers/acpi/ec.c
1411 +++ b/drivers/acpi/ec.c
1412 @@ -1516,7 +1516,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
1413 }
1414
1415 acpi_handle_info(ec->handle,
1416 - "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
1417 + "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
1418 ec->gpe, ec->command_addr, ec->data_addr);
1419 return ret;
1420 }
1421 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
1422 index 6c7dd7af789e..dd70d6c2bca0 100644
1423 --- a/drivers/acpi/ec_sys.c
1424 +++ b/drivers/acpi/ec_sys.c
1425 @@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
1426 return -ENOMEM;
1427 }
1428
1429 - if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe))
1430 + if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe))
1431 goto error;
1432 if (!debugfs_create_bool("use_global_lock", 0444, dev_dir,
1433 &first_ec->global_lock))
1434 diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
1435 index ede83d38beed..2cd2ae152ab7 100644
1436 --- a/drivers/acpi/internal.h
1437 +++ b/drivers/acpi/internal.h
1438 @@ -159,7 +159,7 @@ static inline void acpi_early_processor_osc(void) {}
1439 -------------------------------------------------------------------------- */
1440 struct acpi_ec {
1441 acpi_handle handle;
1442 - unsigned long gpe;
1443 + u32 gpe;
1444 unsigned long command_addr;
1445 unsigned long data_addr;
1446 bool global_lock;
1447 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1448 index b2c0306f97ed..e9dff868c028 100644
1449 --- a/drivers/bluetooth/btusb.c
1450 +++ b/drivers/bluetooth/btusb.c
1451 @@ -277,6 +277,7 @@ static const struct usb_device_id blacklist_table[] = {
1452 { USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME },
1453 { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
1454 { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
1455 + { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
1456 { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
1457
1458 /* Broadcom BCM2035 */
1459 diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1460 index 5294442505cb..0f1dc35e7078 100644
1461 --- a/drivers/char/tpm/tpm-interface.c
1462 +++ b/drivers/char/tpm/tpm-interface.c
1463 @@ -328,7 +328,7 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
1464 }
1465 EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
1466
1467 -static bool tpm_validate_command(struct tpm_chip *chip,
1468 +static int tpm_validate_command(struct tpm_chip *chip,
1469 struct tpm_space *space,
1470 const u8 *cmd,
1471 size_t len)
1472 @@ -340,10 +340,10 @@ static bool tpm_validate_command(struct tpm_chip *chip,
1473 unsigned int nr_handles;
1474
1475 if (len < TPM_HEADER_SIZE)
1476 - return false;
1477 + return -EINVAL;
1478
1479 if (!space)
1480 - return true;
1481 + return 0;
1482
1483 if (chip->flags & TPM_CHIP_FLAG_TPM2 && chip->nr_commands) {
1484 cc = be32_to_cpu(header->ordinal);
1485 @@ -352,7 +352,7 @@ static bool tpm_validate_command(struct tpm_chip *chip,
1486 if (i < 0) {
1487 dev_dbg(&chip->dev, "0x%04X is an invalid command\n",
1488 cc);
1489 - return false;
1490 + return -EOPNOTSUPP;
1491 }
1492
1493 attrs = chip->cc_attrs_tbl[i];
1494 @@ -362,11 +362,11 @@ static bool tpm_validate_command(struct tpm_chip *chip,
1495 goto err_len;
1496 }
1497
1498 - return true;
1499 + return 0;
1500 err_len:
1501 dev_dbg(&chip->dev,
1502 "%s: insufficient command length %zu", __func__, len);
1503 - return false;
1504 + return -EINVAL;
1505 }
1506
1507 /**
1508 @@ -391,8 +391,20 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
1509 unsigned long stop;
1510 bool need_locality;
1511
1512 - if (!tpm_validate_command(chip, space, buf, bufsiz))
1513 - return -EINVAL;
1514 + rc = tpm_validate_command(chip, space, buf, bufsiz);
1515 + if (rc == -EINVAL)
1516 + return rc;
1517 + /*
1518 + * If the command is not implemented by the TPM, synthesize a
1519 + * response with a TPM2_RC_COMMAND_CODE return for user-space.
1520 + */
1521 + if (rc == -EOPNOTSUPP) {
1522 + header->length = cpu_to_be32(sizeof(*header));
1523 + header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
1524 + header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
1525 + TSS2_RESMGR_TPM_RC_LAYER);
1526 + return bufsiz;
1527 + }
1528
1529 if (bufsiz > TPM_BUFSIZE)
1530 bufsiz = TPM_BUFSIZE;
1531 diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
1532 index 2d5466a72e40..0b5b499f726a 100644
1533 --- a/drivers/char/tpm/tpm.h
1534 +++ b/drivers/char/tpm/tpm.h
1535 @@ -93,12 +93,17 @@ enum tpm2_structures {
1536 TPM2_ST_SESSIONS = 0x8002,
1537 };
1538
1539 +/* Indicates from what layer of the software stack the error comes from */
1540 +#define TSS2_RC_LAYER_SHIFT 16
1541 +#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
1542 +
1543 enum tpm2_return_codes {
1544 TPM2_RC_SUCCESS = 0x0000,
1545 TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
1546 TPM2_RC_HANDLE = 0x008B,
1547 TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
1548 TPM2_RC_DISABLED = 0x0120,
1549 + TPM2_RC_COMMAND_CODE = 0x0143,
1550 TPM2_RC_TESTING = 0x090A, /* RC_WARN */
1551 TPM2_RC_REFERENCE_H0 = 0x0910,
1552 };
1553 diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
1554 index 4ed516cb7276..b49942b9fe50 100644
1555 --- a/drivers/clk/clk-divider.c
1556 +++ b/drivers/clk/clk-divider.c
1557 @@ -118,12 +118,11 @@ static unsigned int _get_val(const struct clk_div_table *table,
1558 unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
1559 unsigned int val,
1560 const struct clk_div_table *table,
1561 - unsigned long flags)
1562 + unsigned long flags, unsigned long width)
1563 {
1564 - struct clk_divider *divider = to_clk_divider(hw);
1565 unsigned int div;
1566
1567 - div = _get_div(table, val, flags, divider->width);
1568 + div = _get_div(table, val, flags, width);
1569 if (!div) {
1570 WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
1571 "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
1572 @@ -145,7 +144,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
1573 val &= div_mask(divider->width);
1574
1575 return divider_recalc_rate(hw, parent_rate, val, divider->table,
1576 - divider->flags);
1577 + divider->flags, divider->width);
1578 }
1579
1580 static bool _is_valid_table_div(const struct clk_div_table *table,
1581 diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c
1582 index a1c1f684ad58..9f46cf9dcc65 100644
1583 --- a/drivers/clk/hisilicon/clkdivider-hi6220.c
1584 +++ b/drivers/clk/hisilicon/clkdivider-hi6220.c
1585 @@ -56,7 +56,7 @@ static unsigned long hi6220_clkdiv_recalc_rate(struct clk_hw *hw,
1586 val &= div_mask(dclk->width);
1587
1588 return divider_recalc_rate(hw, parent_rate, val, dclk->table,
1589 - CLK_DIVIDER_ROUND_CLOSEST);
1590 + CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
1591 }
1592
1593 static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate,
1594 diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
1595 index 44a5a535ca63..5144360e2c80 100644
1596 --- a/drivers/clk/meson/clk-mpll.c
1597 +++ b/drivers/clk/meson/clk-mpll.c
1598 @@ -98,7 +98,7 @@ static void params_from_rate(unsigned long requested_rate,
1599 *sdm = SDM_DEN - 1;
1600 } else {
1601 *n2 = div;
1602 - *sdm = DIV_ROUND_UP(rem * SDM_DEN, requested_rate);
1603 + *sdm = DIV_ROUND_UP_ULL((u64)rem * SDM_DEN, requested_rate);
1604 }
1605 }
1606
1607 diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
1608 index 7b359afd620e..a6438f50e6db 100644
1609 --- a/drivers/clk/nxp/clk-lpc32xx.c
1610 +++ b/drivers/clk/nxp/clk-lpc32xx.c
1611 @@ -956,7 +956,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
1612 val &= div_mask(divider->width);
1613
1614 return divider_recalc_rate(hw, parent_rate, val, divider->table,
1615 - divider->flags);
1616 + divider->flags, divider->width);
1617 }
1618
1619 static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
1620 diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
1621 index 53484912301e..928fcc16ee27 100644
1622 --- a/drivers/clk/qcom/clk-regmap-divider.c
1623 +++ b/drivers/clk/qcom/clk-regmap-divider.c
1624 @@ -59,7 +59,7 @@ static unsigned long div_recalc_rate(struct clk_hw *hw,
1625 div &= BIT(divider->width) - 1;
1626
1627 return divider_recalc_rate(hw, parent_rate, div, NULL,
1628 - CLK_DIVIDER_ROUND_CLOSEST);
1629 + CLK_DIVIDER_ROUND_CLOSEST, divider->width);
1630 }
1631
1632 const struct clk_ops clk_regmap_div_ops = {
1633 diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
1634 index f8203115a6bc..c10160d7a556 100644
1635 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
1636 +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
1637 @@ -493,8 +493,8 @@ static SUNXI_CCU_MUX_WITH_GATE(tcon0_clk, "tcon0", tcon0_parents,
1638 0x118, 24, 3, BIT(31), CLK_SET_RATE_PARENT);
1639
1640 static const char * const tcon1_parents[] = { "pll-video1" };
1641 -static SUNXI_CCU_MUX_WITH_GATE(tcon1_clk, "tcon1", tcon1_parents,
1642 - 0x11c, 24, 3, BIT(31), CLK_SET_RATE_PARENT);
1643 +static SUNXI_CCU_M_WITH_MUX_GATE(tcon1_clk, "tcon1", tcon1_parents,
1644 + 0x11c, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT);
1645
1646 static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M", 0x130, BIT(16), 0);
1647
1648 diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
1649 index baa3cf96507b..302a18efd39f 100644
1650 --- a/drivers/clk/sunxi-ng/ccu_div.c
1651 +++ b/drivers/clk/sunxi-ng/ccu_div.c
1652 @@ -71,7 +71,7 @@ static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
1653 parent_rate);
1654
1655 val = divider_recalc_rate(hw, parent_rate, val, cd->div.table,
1656 - cd->div.flags);
1657 + cd->div.flags, cd->div.width);
1658
1659 if (cd->common.features & CCU_FEATURE_FIXED_POSTDIV)
1660 val /= cd->fixed_post_div;
1661 diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
1662 index 7e1e5bbcf430..6b3a63545619 100644
1663 --- a/drivers/cpufreq/powernv-cpufreq.c
1664 +++ b/drivers/cpufreq/powernv-cpufreq.c
1665 @@ -41,11 +41,9 @@
1666 #define POWERNV_MAX_PSTATES 256
1667 #define PMSR_PSAFE_ENABLE (1UL << 30)
1668 #define PMSR_SPR_EM_DISABLE (1UL << 31)
1669 -#define PMSR_MAX(x) ((x >> 32) & 0xFF)
1670 +#define MAX_PSTATE_SHIFT 32
1671 #define LPSTATE_SHIFT 48
1672 #define GPSTATE_SHIFT 56
1673 -#define GET_LPSTATE(x) (((x) >> LPSTATE_SHIFT) & 0xFF)
1674 -#define GET_GPSTATE(x) (((x) >> GPSTATE_SHIFT) & 0xFF)
1675
1676 #define MAX_RAMP_DOWN_TIME 5120
1677 /*
1678 @@ -93,6 +91,7 @@ struct global_pstate_info {
1679 };
1680
1681 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
1682 +u32 pstate_sign_prefix;
1683 static bool rebooting, throttled, occ_reset;
1684
1685 static const char * const throttle_reason[] = {
1686 @@ -147,6 +146,20 @@ static struct powernv_pstate_info {
1687 bool wof_enabled;
1688 } powernv_pstate_info;
1689
1690 +static inline int extract_pstate(u64 pmsr_val, unsigned int shift)
1691 +{
1692 + int ret = ((pmsr_val >> shift) & 0xFF);
1693 +
1694 + if (!ret)
1695 + return ret;
1696 +
1697 + return (pstate_sign_prefix | ret);
1698 +}
1699 +
1700 +#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
1701 +#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
1702 +#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT)
1703 +
1704 /* Use following macros for conversions between pstate_id and index */
1705 static inline int idx_to_pstate(unsigned int i)
1706 {
1707 @@ -277,6 +290,9 @@ static int init_powernv_pstates(void)
1708
1709 powernv_pstate_info.nr_pstates = nr_pstates;
1710 pr_debug("NR PStates %d\n", nr_pstates);
1711 +
1712 + pstate_sign_prefix = pstate_min & ~0xFF;
1713 +
1714 for (i = 0; i < nr_pstates; i++) {
1715 u32 id = be32_to_cpu(pstate_ids[i]);
1716 u32 freq = be32_to_cpu(pstate_freqs[i]);
1717 @@ -437,17 +453,10 @@ struct powernv_smp_call_data {
1718 static void powernv_read_cpu_freq(void *arg)
1719 {
1720 unsigned long pmspr_val;
1721 - s8 local_pstate_id;
1722 struct powernv_smp_call_data *freq_data = arg;
1723
1724 pmspr_val = get_pmspr(SPRN_PMSR);
1725 -
1726 - /*
1727 - * The local pstate id corresponds bits 48..55 in the PMSR.
1728 - * Note: Watch out for the sign!
1729 - */
1730 - local_pstate_id = (pmspr_val >> 48) & 0xFF;
1731 - freq_data->pstate_id = local_pstate_id;
1732 + freq_data->pstate_id = extract_local_pstate(pmspr_val);
1733 freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
1734
1735 pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
1736 @@ -521,7 +530,7 @@ static void powernv_cpufreq_throttle_check(void *data)
1737 chip = this_cpu_read(chip_info);
1738
1739 /* Check for Pmax Capping */
1740 - pmsr_pmax = (s8)PMSR_MAX(pmsr);
1741 + pmsr_pmax = extract_max_pstate(pmsr);
1742 pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
1743 if (pmsr_pmax_idx != powernv_pstate_info.max) {
1744 if (chip->throttled)
1745 @@ -644,8 +653,8 @@ void gpstate_timer_handler(unsigned long data)
1746 * value. Hence, read from PMCR to get correct data.
1747 */
1748 val = get_pmspr(SPRN_PMCR);
1749 - freq_data.gpstate_id = (s8)GET_GPSTATE(val);
1750 - freq_data.pstate_id = (s8)GET_LPSTATE(val);
1751 + freq_data.gpstate_id = extract_global_pstate(val);
1752 + freq_data.pstate_id = extract_local_pstate(val);
1753 if (freq_data.gpstate_id == freq_data.pstate_id) {
1754 reset_gpstates(policy);
1755 spin_unlock(&gpstates->gpstate_lock);
1756 diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
1757 index 202476fbbc4c..8a411514a7c5 100644
1758 --- a/drivers/devfreq/devfreq.c
1759 +++ b/drivers/devfreq/devfreq.c
1760 @@ -935,7 +935,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
1761 if (df->governor == governor) {
1762 ret = 0;
1763 goto out;
1764 - } else if (df->governor->immutable || governor->immutable) {
1765 + } else if ((df->governor && df->governor->immutable) ||
1766 + governor->immutable) {
1767 ret = -EINVAL;
1768 goto out;
1769 }
1770 diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
1771 index ec5d695bbb72..3c68bb525d5d 100644
1772 --- a/drivers/edac/mv64x60_edac.c
1773 +++ b/drivers/edac/mv64x60_edac.c
1774 @@ -758,7 +758,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
1775 /* Non-ECC RAM? */
1776 printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
1777 res = -ENODEV;
1778 - goto err2;
1779 + goto err;
1780 }
1781
1782 edac_dbg(3, "init mci\n");
1783 diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
1784 index 57efb251f9c4..10523ce00c38 100644
1785 --- a/drivers/gpio/gpio-thunderx.c
1786 +++ b/drivers/gpio/gpio-thunderx.c
1787 @@ -566,8 +566,10 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
1788 txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
1789 0, 0, of_node_to_fwnode(dev->of_node),
1790 &thunderx_gpio_irqd_ops, txgpio);
1791 - if (!txgpio->irqd)
1792 + if (!txgpio->irqd) {
1793 + err = -ENOMEM;
1794 goto out;
1795 + }
1796
1797 /* Push on irq_data and the domain for each line. */
1798 for (i = 0; i < ngpio; i++) {
1799 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1800 index bdd68ff197dc..b4c8b25453a6 100644
1801 --- a/drivers/gpio/gpiolib.c
1802 +++ b/drivers/gpio/gpiolib.c
1803 @@ -3340,7 +3340,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
1804 return desc;
1805 }
1806
1807 - status = gpiod_request(desc, con_id);
1808 + /* If a connection label was passed use that, else use the device name as label */
1809 + status = gpiod_request(desc, con_id ? con_id : dev_name(dev));
1810 if (status < 0)
1811 return ERR_PTR(status);
1812
1813 diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
1814 index fe15aa64086f..71fe60e5f01f 100644
1815 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
1816 +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
1817 @@ -698,7 +698,7 @@ static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
1818 val &= div_mask(width);
1819
1820 return divider_recalc_rate(hw, parent_rate, val, NULL,
1821 - postdiv->flags);
1822 + postdiv->flags, width);
1823 }
1824
1825 static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
1826 diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
1827 index 62e38fa8cda2..e362a932fe8c 100644
1828 --- a/drivers/hwmon/ina2xx.c
1829 +++ b/drivers/hwmon/ina2xx.c
1830 @@ -95,18 +95,20 @@ enum ina2xx_ids { ina219, ina226 };
1831
1832 struct ina2xx_config {
1833 u16 config_default;
1834 - int calibration_factor;
1835 + int calibration_value;
1836 int registers;
1837 int shunt_div;
1838 int bus_voltage_shift;
1839 int bus_voltage_lsb; /* uV */
1840 - int power_lsb; /* uW */
1841 + int power_lsb_factor;
1842 };
1843
1844 struct ina2xx_data {
1845 const struct ina2xx_config *config;
1846
1847 long rshunt;
1848 + long current_lsb_uA;
1849 + long power_lsb_uW;
1850 struct mutex config_lock;
1851 struct regmap *regmap;
1852
1853 @@ -116,21 +118,21 @@ struct ina2xx_data {
1854 static const struct ina2xx_config ina2xx_config[] = {
1855 [ina219] = {
1856 .config_default = INA219_CONFIG_DEFAULT,
1857 - .calibration_factor = 40960000,
1858 + .calibration_value = 4096,
1859 .registers = INA219_REGISTERS,
1860 .shunt_div = 100,
1861 .bus_voltage_shift = 3,
1862 .bus_voltage_lsb = 4000,
1863 - .power_lsb = 20000,
1864 + .power_lsb_factor = 20,
1865 },
1866 [ina226] = {
1867 .config_default = INA226_CONFIG_DEFAULT,
1868 - .calibration_factor = 5120000,
1869 + .calibration_value = 2048,
1870 .registers = INA226_REGISTERS,
1871 .shunt_div = 400,
1872 .bus_voltage_shift = 0,
1873 .bus_voltage_lsb = 1250,
1874 - .power_lsb = 25000,
1875 + .power_lsb_factor = 25,
1876 },
1877 };
1878
1879 @@ -169,12 +171,16 @@ static u16 ina226_interval_to_reg(int interval)
1880 return INA226_SHIFT_AVG(avg_bits);
1881 }
1882
1883 +/*
1884 + * Calibration register is set to the best value, which eliminates
1885 + * truncation errors on calculating current register in hardware.
1886 + * According to datasheet (eq. 3) the best values are 2048 for
1887 + * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
1888 + */
1889 static int ina2xx_calibrate(struct ina2xx_data *data)
1890 {
1891 - u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
1892 - data->rshunt);
1893 -
1894 - return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
1895 + return regmap_write(data->regmap, INA2XX_CALIBRATION,
1896 + data->config->calibration_value);
1897 }
1898
1899 /*
1900 @@ -187,10 +193,6 @@ static int ina2xx_init(struct ina2xx_data *data)
1901 if (ret < 0)
1902 return ret;
1903
1904 - /*
1905 - * Set current LSB to 1mA, shunt is in uOhms
1906 - * (equation 13 in datasheet).
1907 - */
1908 return ina2xx_calibrate(data);
1909 }
1910
1911 @@ -268,15 +270,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
1912 val = DIV_ROUND_CLOSEST(val, 1000);
1913 break;
1914 case INA2XX_POWER:
1915 - val = regval * data->config->power_lsb;
1916 + val = regval * data->power_lsb_uW;
1917 break;
1918 case INA2XX_CURRENT:
1919 - /* signed register, LSB=1mA (selected), in mA */
1920 - val = (s16)regval;
1921 + /* signed register, result in mA */
1922 + val = regval * data->current_lsb_uA;
1923 + val = DIV_ROUND_CLOSEST(val, 1000);
1924 break;
1925 case INA2XX_CALIBRATION:
1926 - val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
1927 - regval);
1928 + val = regval;
1929 break;
1930 default:
1931 /* programmer goofed */
1932 @@ -304,9 +306,32 @@ static ssize_t ina2xx_show_value(struct device *dev,
1933 ina2xx_get_value(data, attr->index, regval));
1934 }
1935
1936 -static ssize_t ina2xx_set_shunt(struct device *dev,
1937 - struct device_attribute *da,
1938 - const char *buf, size_t count)
1939 +/*
1940 + * In order to keep calibration register value fixed, the product
1941 + * of current_lsb and shunt_resistor should also be fixed and equal
1942 + * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
1943 + * to keep the scale.
1944 + */
1945 +static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
1946 +{
1947 + unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
1948 + data->config->shunt_div);
1949 + if (val <= 0 || val > dividend)
1950 + return -EINVAL;
1951 +
1952 + mutex_lock(&data->config_lock);
1953 + data->rshunt = val;
1954 + data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
1955 + data->power_lsb_uW = data->config->power_lsb_factor *
1956 + data->current_lsb_uA;
1957 + mutex_unlock(&data->config_lock);
1958 +
1959 + return 0;
1960 +}
1961 +
1962 +static ssize_t ina2xx_store_shunt(struct device *dev,
1963 + struct device_attribute *da,
1964 + const char *buf, size_t count)
1965 {
1966 unsigned long val;
1967 int status;
1968 @@ -316,18 +341,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
1969 if (status < 0)
1970 return status;
1971
1972 - if (val == 0 ||
1973 - /* Values greater than the calibration factor make no sense. */
1974 - val > data->config->calibration_factor)
1975 - return -EINVAL;
1976 -
1977 - mutex_lock(&data->config_lock);
1978 - data->rshunt = val;
1979 - status = ina2xx_calibrate(data);
1980 - mutex_unlock(&data->config_lock);
1981 + status = ina2xx_set_shunt(data, val);
1982 if (status < 0)
1983 return status;
1984 -
1985 return count;
1986 }
1987
1988 @@ -387,7 +403,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
1989
1990 /* shunt resistance */
1991 static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
1992 - ina2xx_show_value, ina2xx_set_shunt,
1993 + ina2xx_show_value, ina2xx_store_shunt,
1994 INA2XX_CALIBRATION);
1995
1996 /* update interval (ina226 only) */
1997 @@ -448,10 +464,7 @@ static int ina2xx_probe(struct i2c_client *client,
1998 val = INA2XX_RSHUNT_DEFAULT;
1999 }
2000
2001 - if (val <= 0 || val > data->config->calibration_factor)
2002 - return -ENODEV;
2003 -
2004 - data->rshunt = val;
2005 + ina2xx_set_shunt(data, val);
2006
2007 ina2xx_regmap_config.max_register = data->config->registers;
2008
2009 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
2010 index 6cae00ecc905..25de7cc9f49f 100644
2011 --- a/drivers/infiniband/core/cma.c
2012 +++ b/drivers/infiniband/core/cma.c
2013 @@ -4453,6 +4453,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
2014 id_stats->qp_type = id->qp_type;
2015
2016 i_id++;
2017 + nlmsg_end(skb, nlh);
2018 }
2019
2020 cb->args[1] = 0;
2021 diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
2022 index 722235bed075..d6fa38f8604f 100644
2023 --- a/drivers/infiniband/core/ucma.c
2024 +++ b/drivers/infiniband/core/ucma.c
2025 @@ -914,13 +914,14 @@ static ssize_t ucma_query_path(struct ucma_context *ctx,
2026
2027 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
2028 IB_PATH_BIDIRECTIONAL;
2029 - if (rec->rec_type == SA_PATH_REC_TYPE_IB) {
2030 - ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
2031 - } else {
2032 + if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
2033 struct sa_path_rec ib;
2034
2035 sa_convert_path_opa_to_ib(&ib, rec);
2036 ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
2037 +
2038 + } else {
2039 + ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
2040 }
2041 }
2042
2043 diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
2044 index d6a1a308c6a0..b7f1ce5333cb 100644
2045 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
2046 +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
2047 @@ -125,7 +125,8 @@ static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
2048 * @conn_ird: connection IRD
2049 * @conn_ord: connection ORD
2050 */
2051 -static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
2052 +static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird,
2053 + u32 conn_ord)
2054 {
2055 if (conn_ird > I40IW_MAX_IRD_SIZE)
2056 conn_ird = I40IW_MAX_IRD_SIZE;
2057 @@ -3841,7 +3842,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2058 }
2059
2060 cm_node->apbvt_set = true;
2061 - i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
2062 + i40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
2063 if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
2064 !cm_node->ord_size)
2065 cm_node->ord_size = 1;
2066 diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
2067 index d86f3e670804..472ef4d6e858 100644
2068 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
2069 +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
2070 @@ -3875,8 +3875,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
2071 hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
2072 hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
2073
2074 - hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
2075 - hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
2076 + hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =
2077 + roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);
2078 + hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =
2079 + roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);
2080 hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
2081 hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
2082 hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
2083 diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
2084 index 24eabcad5e40..019ad3b939f9 100644
2085 --- a/drivers/infiniband/hw/i40iw/i40iw_d.h
2086 +++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
2087 @@ -93,6 +93,7 @@
2088 #define RDMA_OPCODE_MASK 0x0f
2089 #define RDMA_READ_REQ_OPCODE 1
2090 #define Q2_BAD_FRAME_OFFSET 72
2091 +#define Q2_FPSN_OFFSET 64
2092 #define CQE_MAJOR_DRV 0x8000
2093
2094 #define I40IW_TERM_SENT 0x01
2095 diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
2096 index 59f70676f0e0..14d38d733cb4 100644
2097 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
2098 +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
2099 @@ -1376,7 +1376,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
2100 u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
2101 u32 rcv_wnd = hw_host_ctx[23];
2102 /* first partial seq # in q2 */
2103 - u32 fps = qp->q2_buf[16];
2104 + u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
2105 struct list_head *rxlist = &pfpdu->rxlist;
2106 struct list_head *plist;
2107
2108 diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
2109 index 97d71e49c092..88fa4d44ab5f 100644
2110 --- a/drivers/infiniband/sw/rdmavt/cq.c
2111 +++ b/drivers/infiniband/sw/rdmavt/cq.c
2112 @@ -198,7 +198,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
2113 return ERR_PTR(-EINVAL);
2114
2115 /* Allocate the completion queue structure. */
2116 - cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2117 + cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node);
2118 if (!cq)
2119 return ERR_PTR(-ENOMEM);
2120
2121 @@ -214,7 +214,9 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
2122 sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
2123 else
2124 sz += sizeof(struct ib_wc) * (entries + 1);
2125 - wc = vmalloc_user(sz);
2126 + wc = udata ?
2127 + vmalloc_user(sz) :
2128 + vzalloc_node(sz, rdi->dparms.node);
2129 if (!wc) {
2130 ret = ERR_PTR(-ENOMEM);
2131 goto bail_cq;
2132 @@ -369,7 +371,9 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
2133 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
2134 else
2135 sz += sizeof(struct ib_wc) * (cqe + 1);
2136 - wc = vmalloc_user(sz);
2137 + wc = udata ?
2138 + vmalloc_user(sz) :
2139 + vzalloc_node(sz, rdi->dparms.node);
2140 if (!wc)
2141 return -ENOMEM;
2142
2143 diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
2144 index b3bbad7d2282..5dafafad6351 100644
2145 --- a/drivers/input/touchscreen/goodix.c
2146 +++ b/drivers/input/touchscreen/goodix.c
2147 @@ -808,8 +808,10 @@ static int __maybe_unused goodix_suspend(struct device *dev)
2148 int error;
2149
2150 /* We need gpio pins to suspend/resume */
2151 - if (!ts->gpiod_int || !ts->gpiod_rst)
2152 + if (!ts->gpiod_int || !ts->gpiod_rst) {
2153 + disable_irq(client->irq);
2154 return 0;
2155 + }
2156
2157 wait_for_completion(&ts->firmware_loading_complete);
2158
2159 @@ -849,8 +851,10 @@ static int __maybe_unused goodix_resume(struct device *dev)
2160 struct goodix_ts_data *ts = i2c_get_clientdata(client);
2161 int error;
2162
2163 - if (!ts->gpiod_int || !ts->gpiod_rst)
2164 + if (!ts->gpiod_int || !ts->gpiod_rst) {
2165 + enable_irq(client->irq);
2166 return 0;
2167 + }
2168
2169 /*
2170 * Exit sleep mode by outputting HIGH level to INT pin
2171 diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
2172 index ae9ff72e83ee..848fcdf6a112 100644
2173 --- a/drivers/irqchip/irq-gic-v3.c
2174 +++ b/drivers/irqchip/irq-gic-v3.c
2175 @@ -1297,6 +1297,10 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
2176 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
2177 void __iomem *redist_base;
2178
2179 + /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
2180 + if (!(gicc->flags & ACPI_MADT_ENABLED))
2181 + return 0;
2182 +
2183 redist_base = ioremap(gicc->gicr_base_address, size);
2184 if (!redist_base)
2185 return -ENOMEM;
2186 @@ -1346,6 +1350,13 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
2187 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
2188 return 0;
2189
2190 + /*
2191 + * It's perfectly valid firmware can pass disabled GICC entry, driver
2192 + * should not treat as errors, skip the entry instead of probe fail.
2193 + */
2194 + if (!(gicc->flags & ACPI_MADT_ENABLED))
2195 + return 0;
2196 +
2197 return -ENODEV;
2198 }
2199
2200 diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
2201 index 934b1fce4ce1..f0dc8e2aee65 100644
2202 --- a/drivers/md/bcache/alloc.c
2203 +++ b/drivers/md/bcache/alloc.c
2204 @@ -515,15 +515,21 @@ struct open_bucket {
2205
2206 /*
2207 * We keep multiple buckets open for writes, and try to segregate different
2208 - * write streams for better cache utilization: first we look for a bucket where
2209 - * the last write to it was sequential with the current write, and failing that
2210 - * we look for a bucket that was last used by the same task.
2211 + * write streams for better cache utilization: first we try to segregate flash
2212 + * only volume write streams from cached devices, secondly we look for a bucket
2213 + * where the last write to it was sequential with the current write, and
2214 + * failing that we look for a bucket that was last used by the same task.
2215 *
2216 * The ideas is if you've got multiple tasks pulling data into the cache at the
2217 * same time, you'll get better cache utilization if you try to segregate their
2218 * data and preserve locality.
2219 *
2220 - * For example, say you've starting Firefox at the same time you're copying a
2221 + * For example, dirty sectors of flash only volume is not reclaimable, if their
2222 + * dirty sectors mixed with dirty sectors of cached device, such buckets will
2223 + * be marked as dirty and won't be reclaimed, though the dirty data of cached
2224 + * device have been written back to backend device.
2225 + *
2226 + * And say you've starting Firefox at the same time you're copying a
2227 * bunch of files. Firefox will likely end up being fairly hot and stay in the
2228 * cache awhile, but the data you copied might not be; if you wrote all that
2229 * data to the same buckets it'd get invalidated at the same time.
2230 @@ -540,7 +546,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
2231 struct open_bucket *ret, *ret_task = NULL;
2232
2233 list_for_each_entry_reverse(ret, &c->data_buckets, list)
2234 - if (!bkey_cmp(&ret->key, search))
2235 + if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
2236 + UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
2237 + continue;
2238 + else if (!bkey_cmp(&ret->key, search))
2239 goto found;
2240 else if (ret->last_write_point == write_point)
2241 ret_task = ret;
2242 diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
2243 index e9fbf2bcd122..f34ad8720756 100644
2244 --- a/drivers/md/bcache/request.c
2245 +++ b/drivers/md/bcache/request.c
2246 @@ -568,6 +568,7 @@ static void cache_lookup(struct closure *cl)
2247 {
2248 struct search *s = container_of(cl, struct search, iop.cl);
2249 struct bio *bio = &s->bio.bio;
2250 + struct cached_dev *dc;
2251 int ret;
2252
2253 bch_btree_op_init(&s->op, -1);
2254 @@ -580,6 +581,27 @@ static void cache_lookup(struct closure *cl)
2255 return;
2256 }
2257
2258 + /*
2259 + * We might meet err when searching the btree, If that happens, we will
2260 + * get negative ret, in this scenario we should not recover data from
2261 + * backing device (when cache device is dirty) because we don't know
2262 + * whether bkeys the read request covered are all clean.
2263 + *
2264 + * And after that happened, s->iop.status is still its initial value
2265 + * before we submit s->bio.bio
2266 + */
2267 + if (ret < 0) {
2268 + BUG_ON(ret == -EINTR);
2269 + if (s->d && s->d->c &&
2270 + !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
2271 + dc = container_of(s->d, struct cached_dev, disk);
2272 + if (dc && atomic_read(&dc->has_dirty))
2273 + s->recoverable = false;
2274 + }
2275 + if (!s->iop.status)
2276 + s->iop.status = BLK_STS_IOERR;
2277 + }
2278 +
2279 closure_return(cl);
2280 }
2281
2282 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
2283 index 9417170f180a..5d0430777dda 100644
2284 --- a/drivers/md/bcache/super.c
2285 +++ b/drivers/md/bcache/super.c
2286 @@ -893,6 +893,12 @@ static void cached_dev_detach_finish(struct work_struct *w)
2287
2288 mutex_lock(&bch_register_lock);
2289
2290 + cancel_delayed_work_sync(&dc->writeback_rate_update);
2291 + if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
2292 + kthread_stop(dc->writeback_thread);
2293 + dc->writeback_thread = NULL;
2294 + }
2295 +
2296 memset(&dc->sb.set_uuid, 0, 16);
2297 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
2298
2299 diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
2300 index cb115ba6a1d2..6d9adcaa26ba 100644
2301 --- a/drivers/media/v4l2-core/videobuf2-core.c
2302 +++ b/drivers/media/v4l2-core/videobuf2-core.c
2303 @@ -332,6 +332,10 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
2304 struct vb2_buffer *vb;
2305 int ret;
2306
2307 + /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
2308 + num_buffers = min_t(unsigned int, num_buffers,
2309 + VB2_MAX_FRAME - q->num_buffers);
2310 +
2311 for (buffer = 0; buffer < num_buffers; ++buffer) {
2312 /* Allocate videobuf buffer structures */
2313 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
2314 diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
2315 index 070f5da06fd2..5bedf4b7f0f7 100644
2316 --- a/drivers/mmc/host/sdhci-pci-core.c
2317 +++ b/drivers/mmc/host/sdhci-pci-core.c
2318 @@ -806,6 +806,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
2319 slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
2320 break;
2321 case INTEL_MRFLD_SDIO:
2322 + /* Advertise 2.0v for compatibility with the SDIO card's OCR */
2323 + slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
2324 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
2325 MMC_CAP_POWER_OFF_CARD;
2326 break;
2327 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2328 index 90cc1977b792..d35deb79965d 100644
2329 --- a/drivers/mmc/host/sdhci.c
2330 +++ b/drivers/mmc/host/sdhci.c
2331 @@ -1470,6 +1470,13 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
2332 if (mode != MMC_POWER_OFF) {
2333 switch (1 << vdd) {
2334 case MMC_VDD_165_195:
2335 + /*
2336 + * Without a regulator, SDHCI does not support 2.0v
2337 + * so we only get here if the driver deliberately
2338 + * added the 2.0v range to ocr_avail. Map it to 1.8v
2339 + * for the purpose of turning on the power.
2340 + */
2341 + case MMC_VDD_20_21:
2342 pwr = SDHCI_POWER_180;
2343 break;
2344 case MMC_VDD_29_30:
2345 diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
2346 index 1cb3f7758fb6..766b2c385682 100644
2347 --- a/drivers/mtd/tests/oobtest.c
2348 +++ b/drivers/mtd/tests/oobtest.c
2349 @@ -193,6 +193,9 @@ static int verify_eraseblock(int ebnum)
2350 ops.datbuf = NULL;
2351 ops.oobbuf = readbuf;
2352 err = mtd_read_oob(mtd, addr, &ops);
2353 + if (mtd_is_bitflip(err))
2354 + err = 0;
2355 +
2356 if (err || ops.oobretlen != use_len) {
2357 pr_err("error: readoob failed at %#llx\n",
2358 (long long)addr);
2359 @@ -227,6 +230,9 @@ static int verify_eraseblock(int ebnum)
2360 ops.datbuf = NULL;
2361 ops.oobbuf = readbuf;
2362 err = mtd_read_oob(mtd, addr, &ops);
2363 + if (mtd_is_bitflip(err))
2364 + err = 0;
2365 +
2366 if (err || ops.oobretlen != mtd->oobavail) {
2367 pr_err("error: readoob failed at %#llx\n",
2368 (long long)addr);
2369 @@ -286,6 +292,9 @@ static int verify_eraseblock_in_one_go(int ebnum)
2370
2371 /* read entire block's OOB at one go */
2372 err = mtd_read_oob(mtd, addr, &ops);
2373 + if (mtd_is_bitflip(err))
2374 + err = 0;
2375 +
2376 if (err || ops.oobretlen != len) {
2377 pr_err("error: readoob failed at %#llx\n",
2378 (long long)addr);
2379 @@ -527,6 +536,9 @@ static int __init mtd_oobtest_init(void)
2380 pr_info("attempting to start read past end of OOB\n");
2381 pr_info("an error is expected...\n");
2382 err = mtd_read_oob(mtd, addr0, &ops);
2383 + if (mtd_is_bitflip(err))
2384 + err = 0;
2385 +
2386 if (err) {
2387 pr_info("error occurred as expected\n");
2388 err = 0;
2389 @@ -571,6 +583,9 @@ static int __init mtd_oobtest_init(void)
2390 pr_info("attempting to read past end of device\n");
2391 pr_info("an error is expected...\n");
2392 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
2393 + if (mtd_is_bitflip(err))
2394 + err = 0;
2395 +
2396 if (err) {
2397 pr_info("error occurred as expected\n");
2398 err = 0;
2399 @@ -615,6 +630,9 @@ static int __init mtd_oobtest_init(void)
2400 pr_info("attempting to read past end of device\n");
2401 pr_info("an error is expected...\n");
2402 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
2403 + if (mtd_is_bitflip(err))
2404 + err = 0;
2405 +
2406 if (err) {
2407 pr_info("error occurred as expected\n");
2408 err = 0;
2409 @@ -684,6 +702,9 @@ static int __init mtd_oobtest_init(void)
2410 ops.datbuf = NULL;
2411 ops.oobbuf = readbuf;
2412 err = mtd_read_oob(mtd, addr, &ops);
2413 + if (mtd_is_bitflip(err))
2414 + err = 0;
2415 +
2416 if (err)
2417 goto out;
2418 if (memcmpshow(addr, readbuf, writebuf,
2419 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2420 index b2db581131b2..82f28ffccddf 100644
2421 --- a/drivers/net/bonding/bond_main.c
2422 +++ b/drivers/net/bonding/bond_main.c
2423 @@ -1524,39 +1524,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
2424 goto err_close;
2425 }
2426
2427 - /* If the mode uses primary, then the following is handled by
2428 - * bond_change_active_slave().
2429 - */
2430 - if (!bond_uses_primary(bond)) {
2431 - /* set promiscuity level to new slave */
2432 - if (bond_dev->flags & IFF_PROMISC) {
2433 - res = dev_set_promiscuity(slave_dev, 1);
2434 - if (res)
2435 - goto err_close;
2436 - }
2437 -
2438 - /* set allmulti level to new slave */
2439 - if (bond_dev->flags & IFF_ALLMULTI) {
2440 - res = dev_set_allmulti(slave_dev, 1);
2441 - if (res)
2442 - goto err_close;
2443 - }
2444 -
2445 - netif_addr_lock_bh(bond_dev);
2446 -
2447 - dev_mc_sync_multiple(slave_dev, bond_dev);
2448 - dev_uc_sync_multiple(slave_dev, bond_dev);
2449 -
2450 - netif_addr_unlock_bh(bond_dev);
2451 - }
2452 -
2453 - if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2454 - /* add lacpdu mc addr to mc list */
2455 - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
2456 -
2457 - dev_mc_add(slave_dev, lacpdu_multicast);
2458 - }
2459 -
2460 res = vlan_vids_add_by_dev(slave_dev, bond_dev);
2461 if (res) {
2462 netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
2463 @@ -1721,6 +1688,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
2464 goto err_upper_unlink;
2465 }
2466
2467 + /* If the mode uses primary, then the following is handled by
2468 + * bond_change_active_slave().
2469 + */
2470 + if (!bond_uses_primary(bond)) {
2471 + /* set promiscuity level to new slave */
2472 + if (bond_dev->flags & IFF_PROMISC) {
2473 + res = dev_set_promiscuity(slave_dev, 1);
2474 + if (res)
2475 + goto err_sysfs_del;
2476 + }
2477 +
2478 + /* set allmulti level to new slave */
2479 + if (bond_dev->flags & IFF_ALLMULTI) {
2480 + res = dev_set_allmulti(slave_dev, 1);
2481 + if (res) {
2482 + if (bond_dev->flags & IFF_PROMISC)
2483 + dev_set_promiscuity(slave_dev, -1);
2484 + goto err_sysfs_del;
2485 + }
2486 + }
2487 +
2488 + netif_addr_lock_bh(bond_dev);
2489 + dev_mc_sync_multiple(slave_dev, bond_dev);
2490 + dev_uc_sync_multiple(slave_dev, bond_dev);
2491 + netif_addr_unlock_bh(bond_dev);
2492 +
2493 + if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2494 + /* add lacpdu mc addr to mc list */
2495 + u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
2496 +
2497 + dev_mc_add(slave_dev, lacpdu_multicast);
2498 + }
2499 + }
2500 +
2501 bond->slave_cnt++;
2502 bond_compute_features(bond);
2503 bond_set_carrier(bond);
2504 @@ -1744,6 +1745,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
2505 return 0;
2506
2507 /* Undo stages on error */
2508 +err_sysfs_del:
2509 + bond_sysfs_slave_del(new_slave);
2510 +
2511 err_upper_unlink:
2512 bond_upper_dev_unlink(bond, new_slave);
2513
2514 @@ -1751,9 +1755,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
2515 netdev_rx_handler_unregister(slave_dev);
2516
2517 err_detach:
2518 - if (!bond_uses_primary(bond))
2519 - bond_hw_addr_flush(bond_dev, slave_dev);
2520 -
2521 vlan_vids_del_by_dev(slave_dev, bond_dev);
2522 if (rcu_access_pointer(bond->primary_slave) == new_slave)
2523 RCU_INIT_POINTER(bond->primary_slave, NULL);
2524 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2525 index 05498e7f2840..6246003f9922 100644
2526 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2527 +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2528 @@ -2619,8 +2619,8 @@ void t4vf_sge_stop(struct adapter *adapter)
2529 int t4vf_sge_init(struct adapter *adapter)
2530 {
2531 struct sge_params *sge_params = &adapter->params.sge;
2532 - u32 fl0 = sge_params->sge_fl_buffer_size[0];
2533 - u32 fl1 = sge_params->sge_fl_buffer_size[1];
2534 + u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
2535 + u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
2536 struct sge *s = &adapter->sge;
2537
2538 /*
2539 @@ -2628,9 +2628,20 @@ int t4vf_sge_init(struct adapter *adapter)
2540 * the Physical Function Driver. Ideally we should be able to deal
2541 * with _any_ configuration. Practice is different ...
2542 */
2543 - if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
2544 +
2545 + /* We only bother using the Large Page logic if the Large Page Buffer
2546 + * is larger than our Page Size Buffer.
2547 + */
2548 + if (fl_large_pg <= fl_small_pg)
2549 + fl_large_pg = 0;
2550 +
2551 + /* The Page Size Buffer must be exactly equal to our Page Size and the
2552 + * Large Page Size Buffer should be 0 (per above) or a power of 2.
2553 + */
2554 + if (fl_small_pg != PAGE_SIZE ||
2555 + (fl_large_pg & (fl_large_pg - 1)) != 0) {
2556 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2557 - fl0, fl1);
2558 + fl_small_pg, fl_large_pg);
2559 return -EINVAL;
2560 }
2561 if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
2562 @@ -2642,8 +2653,8 @@ int t4vf_sge_init(struct adapter *adapter)
2563 /*
2564 * Now translate the adapter parameters into our internal forms.
2565 */
2566 - if (fl1)
2567 - s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2568 + if (fl_large_pg)
2569 + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2570 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2571 ? 128 : 64);
2572 s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2573 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2574 index a0ef97e7f3c9..ff7a70ffafc6 100644
2575 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2576 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2577 @@ -2092,6 +2092,10 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
2578 {
2579 struct hclge_vport *vport = hclge_get_vport(handle);
2580 struct hclge_dev *hdev = vport->back;
2581 + struct phy_device *phydev = hdev->hw.mac.phydev;
2582 +
2583 + if (phydev)
2584 + return phydev->autoneg;
2585
2586 hclge_query_autoneg_result(hdev);
2587
2588 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
2589 index 186772493711..d1e4dcec5db2 100644
2590 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
2591 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
2592 @@ -1060,6 +1060,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2593 u64 rx_bytes = 0;
2594 u64 tx_pkts = 0;
2595 u64 rx_pkts = 0;
2596 + u64 tx_drop = 0;
2597 + u64 rx_drop = 0;
2598
2599 for (idx = 0; idx < queue_num; idx++) {
2600 /* fetch the tx stats */
2601 @@ -1068,6 +1070,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2602 start = u64_stats_fetch_begin_irq(&ring->syncp);
2603 tx_bytes += ring->stats.tx_bytes;
2604 tx_pkts += ring->stats.tx_pkts;
2605 + tx_drop += ring->stats.tx_busy;
2606 + tx_drop += ring->stats.sw_err_cnt;
2607 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
2608
2609 /* fetch the rx stats */
2610 @@ -1076,6 +1080,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2611 start = u64_stats_fetch_begin_irq(&ring->syncp);
2612 rx_bytes += ring->stats.rx_bytes;
2613 rx_pkts += ring->stats.rx_pkts;
2614 + rx_drop += ring->stats.non_vld_descs;
2615 + rx_drop += ring->stats.err_pkt_len;
2616 + rx_drop += ring->stats.l2_err;
2617 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
2618 }
2619
2620 @@ -1091,8 +1098,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2621 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
2622
2623 stats->tx_errors = netdev->stats.tx_errors;
2624 - stats->rx_dropped = netdev->stats.rx_dropped;
2625 - stats->tx_dropped = netdev->stats.tx_dropped;
2626 + stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
2627 + stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
2628 stats->collisions = netdev->stats.collisions;
2629 stats->rx_over_errors = netdev->stats.rx_over_errors;
2630 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
2631 @@ -1306,6 +1313,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
2632 return ret;
2633 }
2634
2635 + netdev->mtu = new_mtu;
2636 +
2637 /* if the netdev was running earlier, bring it up again */
2638 if (if_running && hns3_nic_net_open(netdev))
2639 ret = -EINVAL;
2640 @@ -2687,8 +2696,12 @@ static int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
2641 h->ae_algo->ops->reset_queue(h, i);
2642
2643 hns3_fini_ring(priv->ring_data[i].ring);
2644 + devm_kfree(priv->dev, priv->ring_data[i].ring);
2645 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2646 + devm_kfree(priv->dev,
2647 + priv->ring_data[i + h->kinfo.num_tqps].ring);
2648 }
2649 + devm_kfree(priv->dev, priv->ring_data);
2650
2651 return 0;
2652 }
2653 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
2654 index e590d96e434a..a64a5a413d4d 100644
2655 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
2656 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
2657 @@ -22,7 +22,8 @@ struct hns3_stats {
2658 #define HNS3_TQP_STAT(_string, _member) { \
2659 .stats_string = _string, \
2660 .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \
2661 - .stats_offset = offsetof(struct hns3_enet_ring, stats), \
2662 + .stats_offset = offsetof(struct hns3_enet_ring, stats) +\
2663 + offsetof(struct ring_stats, _member), \
2664 } \
2665
2666 static const struct hns3_stats hns3_txq_stats[] = {
2667 @@ -189,13 +190,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
2668 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
2669 struct hns3_enet_ring *ring;
2670 u8 *stat;
2671 - u32 i;
2672 + int i, j;
2673
2674 /* get stats for Tx */
2675 for (i = 0; i < kinfo->num_tqps; i++) {
2676 ring = nic_priv->ring_data[i].ring;
2677 - for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) {
2678 - stat = (u8 *)ring + hns3_txq_stats[i].stats_offset;
2679 + for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
2680 + stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
2681 *data++ = *(u64 *)stat;
2682 }
2683 }
2684 @@ -203,8 +204,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
2685 /* get stats for Rx */
2686 for (i = 0; i < kinfo->num_tqps; i++) {
2687 ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
2688 - for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) {
2689 - stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset;
2690 + for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
2691 + stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
2692 *data++ = *(u64 *)stat;
2693 }
2694 }
2695 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
2696 index 3b0db01ead1f..3ae02b0620bc 100644
2697 --- a/drivers/net/ethernet/ibm/ibmvnic.c
2698 +++ b/drivers/net/ethernet/ibm/ibmvnic.c
2699 @@ -2209,6 +2209,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2700 struct ibmvnic_sub_crq_queue *scrq = instance;
2701 struct ibmvnic_adapter *adapter = scrq->adapter;
2702
2703 + /* When booting a kdump kernel we can hit pending interrupts
2704 + * prior to completing driver initialization.
2705 + */
2706 + if (unlikely(adapter->state != VNIC_OPEN))
2707 + return IRQ_NONE;
2708 +
2709 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2710
2711 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2712 diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
2713 index 1ccad6f30ebf..4eb6ff60e8fc 100644
2714 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
2715 +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
2716 @@ -1775,7 +1775,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
2717
2718 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
2719
2720 - if (netif_running(adapter->netdev)) {
2721 + /* We don't use netif_running() because it may be true prior to
2722 + * ndo_open() returning, so we can't assume it means all our open
2723 + * tasks have finished, since we're not holding the rtnl_lock here.
2724 + */
2725 + if (adapter->state == __I40EVF_RUNNING) {
2726 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
2727 netif_carrier_off(adapter->netdev);
2728 netif_tx_disable(adapter->netdev);
2729 @@ -1833,6 +1837,7 @@ static void i40evf_reset_task(struct work_struct *work)
2730 struct i40evf_mac_filter *f;
2731 u32 reg_val;
2732 int i = 0, err;
2733 + bool running;
2734
2735 while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
2736 &adapter->crit_section))
2737 @@ -1892,7 +1897,13 @@ static void i40evf_reset_task(struct work_struct *work)
2738 }
2739
2740 continue_reset:
2741 - if (netif_running(netdev)) {
2742 + /* We don't use netif_running() because it may be true prior to
2743 + * ndo_open() returning, so we can't assume it means all our open
2744 + * tasks have finished, since we're not holding the rtnl_lock here.
2745 + */
2746 + running = (adapter->state == __I40EVF_RUNNING);
2747 +
2748 + if (running) {
2749 netif_carrier_off(netdev);
2750 netif_tx_stop_all_queues(netdev);
2751 adapter->link_up = false;
2752 @@ -1936,7 +1947,10 @@ static void i40evf_reset_task(struct work_struct *work)
2753
2754 mod_timer(&adapter->watchdog_timer, jiffies + 2);
2755
2756 - if (netif_running(adapter->netdev)) {
2757 + /* We were running when the reset started, so we need to restore some
2758 + * state here.
2759 + */
2760 + if (running) {
2761 /* allocate transmit descriptors */
2762 err = i40evf_setup_all_tx_resources(adapter);
2763 if (err)
2764 diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
2765 index 1145cde2274a..b12e3a4f9439 100644
2766 --- a/drivers/net/ethernet/marvell/sky2.c
2767 +++ b/drivers/net/ethernet/marvell/sky2.c
2768 @@ -5087,7 +5087,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2769 INIT_WORK(&hw->restart_work, sky2_restart);
2770
2771 pci_set_drvdata(pdev, hw);
2772 - pdev->d3_delay = 150;
2773 + pdev->d3_delay = 200;
2774
2775 return 0;
2776
2777 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
2778 index 5f41dc92aa68..752a72499b4f 100644
2779 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
2780 +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
2781 @@ -156,57 +156,63 @@ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
2782 static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
2783 {
2784 struct mlx4_en_priv *priv = netdev_priv(netdev);
2785 + struct mlx4_en_port_profile *prof = priv->prof;
2786 struct mlx4_en_dev *mdev = priv->mdev;
2787 + u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
2788
2789 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
2790 return 1;
2791
2792 if (priv->cee_config.pfc_state) {
2793 int tc;
2794 + rx_ppp = prof->rx_ppp;
2795 + tx_ppp = prof->tx_ppp;
2796
2797 - priv->prof->rx_pause = 0;
2798 - priv->prof->tx_pause = 0;
2799 for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
2800 u8 tc_mask = 1 << tc;
2801
2802 switch (priv->cee_config.dcb_pfc[tc]) {
2803 case pfc_disabled:
2804 - priv->prof->tx_ppp &= ~tc_mask;
2805 - priv->prof->rx_ppp &= ~tc_mask;
2806 + tx_ppp &= ~tc_mask;
2807 + rx_ppp &= ~tc_mask;
2808 break;
2809 case pfc_enabled_full:
2810 - priv->prof->tx_ppp |= tc_mask;
2811 - priv->prof->rx_ppp |= tc_mask;
2812 + tx_ppp |= tc_mask;
2813 + rx_ppp |= tc_mask;
2814 break;
2815 case pfc_enabled_tx:
2816 - priv->prof->tx_ppp |= tc_mask;
2817 - priv->prof->rx_ppp &= ~tc_mask;
2818 + tx_ppp |= tc_mask;
2819 + rx_ppp &= ~tc_mask;
2820 break;
2821 case pfc_enabled_rx:
2822 - priv->prof->tx_ppp &= ~tc_mask;
2823 - priv->prof->rx_ppp |= tc_mask;
2824 + tx_ppp &= ~tc_mask;
2825 + rx_ppp |= tc_mask;
2826 break;
2827 default:
2828 break;
2829 }
2830 }
2831 - en_dbg(DRV, priv, "Set pfc on\n");
2832 + rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause;
2833 + tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause;
2834 } else {
2835 - priv->prof->rx_pause = 1;
2836 - priv->prof->tx_pause = 1;
2837 - en_dbg(DRV, priv, "Set pfc off\n");
2838 + rx_ppp = 0;
2839 + tx_ppp = 0;
2840 + rx_pause = prof->rx_pause;
2841 + tx_pause = prof->tx_pause;
2842 }
2843
2844 if (mlx4_SET_PORT_general(mdev->dev, priv->port,
2845 priv->rx_skb_size + ETH_FCS_LEN,
2846 - priv->prof->tx_pause,
2847 - priv->prof->tx_ppp,
2848 - priv->prof->rx_pause,
2849 - priv->prof->rx_ppp)) {
2850 + tx_pause, tx_ppp, rx_pause, rx_ppp)) {
2851 en_err(priv, "Failed setting pause params\n");
2852 return 1;
2853 }
2854
2855 + prof->tx_ppp = tx_ppp;
2856 + prof->rx_ppp = rx_ppp;
2857 + prof->tx_pause = tx_pause;
2858 + prof->rx_pause = rx_pause;
2859 +
2860 return 0;
2861 }
2862
2863 @@ -310,6 +316,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
2864 }
2865
2866 switch (ets->tc_tsa[i]) {
2867 + case IEEE_8021QAZ_TSA_VENDOR:
2868 case IEEE_8021QAZ_TSA_STRICT:
2869 break;
2870 case IEEE_8021QAZ_TSA_ETS:
2871 @@ -347,6 +354,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
2872 /* higher TC means higher priority => lower pg */
2873 for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
2874 switch (ets->tc_tsa[i]) {
2875 + case IEEE_8021QAZ_TSA_VENDOR:
2876 + pg[i] = MLX4_EN_TC_VENDOR;
2877 + tc_tx_bw[i] = MLX4_EN_BW_MAX;
2878 + break;
2879 case IEEE_8021QAZ_TSA_STRICT:
2880 pg[i] = num_strict++;
2881 tc_tx_bw[i] = MLX4_EN_BW_MAX;
2882 @@ -403,6 +414,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
2883 struct mlx4_en_priv *priv = netdev_priv(dev);
2884 struct mlx4_en_port_profile *prof = priv->prof;
2885 struct mlx4_en_dev *mdev = priv->mdev;
2886 + u32 tx_pause, tx_ppp, rx_pause, rx_ppp;
2887 int err;
2888
2889 en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
2890 @@ -411,23 +423,26 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
2891 pfc->mbc,
2892 pfc->delay);
2893
2894 - prof->rx_pause = !pfc->pfc_en;
2895 - prof->tx_pause = !pfc->pfc_en;
2896 - prof->rx_ppp = pfc->pfc_en;
2897 - prof->tx_ppp = pfc->pfc_en;
2898 + rx_pause = prof->rx_pause && !pfc->pfc_en;
2899 + tx_pause = prof->tx_pause && !pfc->pfc_en;
2900 + rx_ppp = pfc->pfc_en;
2901 + tx_ppp = pfc->pfc_en;
2902
2903 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
2904 priv->rx_skb_size + ETH_FCS_LEN,
2905 - prof->tx_pause,
2906 - prof->tx_ppp,
2907 - prof->rx_pause,
2908 - prof->rx_ppp);
2909 - if (err)
2910 + tx_pause, tx_ppp, rx_pause, rx_ppp);
2911 + if (err) {
2912 en_err(priv, "Failed setting pause params\n");
2913 - else
2914 - mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
2915 - prof->rx_ppp, prof->rx_pause,
2916 - prof->tx_ppp, prof->tx_pause);
2917 + return err;
2918 + }
2919 +
2920 + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
2921 + rx_ppp, rx_pause, tx_ppp, tx_pause);
2922 +
2923 + prof->tx_ppp = tx_ppp;
2924 + prof->rx_ppp = rx_ppp;
2925 + prof->rx_pause = rx_pause;
2926 + prof->tx_pause = tx_pause;
2927
2928 return err;
2929 }
2930 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
2931 index 3d4e4a5d00d1..67f74fcb265e 100644
2932 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
2933 +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
2934 @@ -1046,27 +1046,32 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
2935 {
2936 struct mlx4_en_priv *priv = netdev_priv(dev);
2937 struct mlx4_en_dev *mdev = priv->mdev;
2938 + u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
2939 int err;
2940
2941 if (pause->autoneg)
2942 return -EINVAL;
2943
2944 - priv->prof->tx_pause = pause->tx_pause != 0;
2945 - priv->prof->rx_pause = pause->rx_pause != 0;
2946 + tx_pause = !!(pause->tx_pause);
2947 + rx_pause = !!(pause->rx_pause);
2948 + rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
2949 + tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
2950 +
2951 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
2952 priv->rx_skb_size + ETH_FCS_LEN,
2953 - priv->prof->tx_pause,
2954 - priv->prof->tx_ppp,
2955 - priv->prof->rx_pause,
2956 - priv->prof->rx_ppp);
2957 - if (err)
2958 - en_err(priv, "Failed setting pause params\n");
2959 - else
2960 - mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
2961 - priv->prof->rx_ppp,
2962 - priv->prof->rx_pause,
2963 - priv->prof->tx_ppp,
2964 - priv->prof->tx_pause);
2965 + tx_pause, tx_ppp, rx_pause, rx_ppp);
2966 + if (err) {
2967 + en_err(priv, "Failed setting pause params, err = %d\n", err);
2968 + return err;
2969 + }
2970 +
2971 + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
2972 + rx_ppp, rx_pause, tx_ppp, tx_pause);
2973 +
2974 + priv->prof->tx_pause = tx_pause;
2975 + priv->prof->rx_pause = rx_pause;
2976 + priv->prof->tx_ppp = tx_ppp;
2977 + priv->prof->rx_ppp = rx_ppp;
2978
2979 return err;
2980 }
2981 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
2982 index 686e18de9a97..6b2f7122b3ab 100644
2983 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
2984 +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
2985 @@ -163,9 +163,9 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
2986 params->udp_rss = 0;
2987 }
2988 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
2989 - params->prof[i].rx_pause = 1;
2990 + params->prof[i].rx_pause = !(pfcrx || pfctx);
2991 params->prof[i].rx_ppp = pfcrx;
2992 - params->prof[i].tx_pause = 1;
2993 + params->prof[i].tx_pause = !(pfcrx || pfctx);
2994 params->prof[i].tx_ppp = pfctx;
2995 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
2996 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
2997 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2998 index 9c218f1cfc6c..c097eef41a9c 100644
2999 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
3000 +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
3001 @@ -3335,6 +3335,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3002 priv->msg_enable = MLX4_EN_MSG_LEVEL;
3003 #ifdef CONFIG_MLX4_EN_DCB
3004 if (!mlx4_is_slave(priv->mdev->dev)) {
3005 + u8 prio;
3006 +
3007 + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
3008 + priv->ets.prio_tc[prio] = prio;
3009 + priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
3010 + }
3011 +
3012 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3013 DCB_CAP_DCBX_VER_IEEE;
3014 priv->flags |= MLX4_EN_DCB_ENABLED;
3015 diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
3016 index fdb3ad0cbe54..2c1a5ff6acfa 100644
3017 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
3018 +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
3019 @@ -476,6 +476,7 @@ struct mlx4_en_frag_info {
3020 #define MLX4_EN_BW_MIN 1
3021 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
3022
3023 +#define MLX4_EN_TC_VENDOR 0
3024 #define MLX4_EN_TC_ETS 7
3025
3026 enum dcb_pfc_type {
3027 diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
3028 index fabb53379727..a069fcc823c3 100644
3029 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
3030 +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
3031 @@ -5089,6 +5089,7 @@ static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3032 &tracker->res_tree[RES_FS_RULE]);
3033 list_del(&fs_rule->com.list);
3034 spin_unlock_irq(mlx4_tlock(dev));
3035 + kfree(fs_rule->mirr_mbox);
3036 kfree(fs_rule);
3037 state = 0;
3038 break;
3039 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3040 index a863572882b2..225b2ad3e15f 100644
3041 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3042 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3043 @@ -2718,6 +2718,9 @@ int mlx5e_open(struct net_device *netdev)
3044 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
3045 mutex_unlock(&priv->state_lock);
3046
3047 + if (mlx5e_vxlan_allowed(priv->mdev))
3048 + udp_tunnel_get_rx_info(netdev);
3049 +
3050 return err;
3051 }
3052
3053 @@ -4276,13 +4279,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
3054 if (netdev->reg_state != NETREG_REGISTERED)
3055 return;
3056
3057 - /* Device already registered: sync netdev system state */
3058 - if (mlx5e_vxlan_allowed(mdev)) {
3059 - rtnl_lock();
3060 - udp_tunnel_get_rx_info(netdev);
3061 - rtnl_unlock();
3062 - }
3063 -
3064 queue_work(priv->wq, &priv->set_rx_mode_work);
3065
3066 rtnl_lock();
3067 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
3068 index 45e03c427faf..5ffd1db4e797 100644
3069 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
3070 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
3071 @@ -43,6 +43,11 @@
3072 #include "en_tc.h"
3073 #include "fs_core.h"
3074
3075 +#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
3076 + max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
3077 +#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
3078 + max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
3079 +
3080 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
3081
3082 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
3083 @@ -230,7 +235,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
3084 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
3085 {
3086 #if IS_ENABLED(CONFIG_IPV6)
3087 - unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms,
3088 + unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
3089 DELAY_PROBE_TIME);
3090 #else
3091 unsigned long ipv6_interval = ~0UL;
3092 @@ -366,7 +371,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
3093 case NETEVENT_NEIGH_UPDATE:
3094 n = ptr;
3095 #if IS_ENABLED(CONFIG_IPV6)
3096 - if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
3097 + if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
3098 #else
3099 if (n->tbl != &arp_tbl)
3100 #endif
3101 @@ -414,7 +419,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
3102 * done per device delay prob time parameter.
3103 */
3104 #if IS_ENABLED(CONFIG_IPV6)
3105 - if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
3106 + if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
3107 #else
3108 if (!p->dev || p->tbl != &arp_tbl)
3109 #endif
3110 @@ -610,7 +615,6 @@ static int mlx5e_rep_open(struct net_device *dev)
3111 struct mlx5e_priv *priv = netdev_priv(dev);
3112 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3113 struct mlx5_eswitch_rep *rep = rpriv->rep;
3114 - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3115 int err;
3116
3117 mutex_lock(&priv->state_lock);
3118 @@ -618,8 +622,9 @@ static int mlx5e_rep_open(struct net_device *dev)
3119 if (err)
3120 goto unlock;
3121
3122 - if (!mlx5_eswitch_set_vport_state(esw, rep->vport,
3123 - MLX5_ESW_VPORT_ADMIN_STATE_UP))
3124 + if (!mlx5_modify_vport_admin_state(priv->mdev,
3125 + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
3126 + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
3127 netif_carrier_on(dev);
3128
3129 unlock:
3130 @@ -632,11 +637,12 @@ static int mlx5e_rep_close(struct net_device *dev)
3131 struct mlx5e_priv *priv = netdev_priv(dev);
3132 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3133 struct mlx5_eswitch_rep *rep = rpriv->rep;
3134 - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3135 int ret;
3136
3137 mutex_lock(&priv->state_lock);
3138 - (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
3139 + mlx5_modify_vport_admin_state(priv->mdev,
3140 + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
3141 + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
3142 ret = mlx5e_close_locked(dev);
3143 mutex_unlock(&priv->state_lock);
3144 return ret;
3145 @@ -797,9 +803,9 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
3146 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
3147 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
3148
3149 - params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
3150 + params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
3151 params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
3152 - params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
3153 + params->log_rq_size = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
3154
3155 params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
3156 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
3157 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
3158 index 9ba1f72060aa..42bab73a9f40 100644
3159 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
3160 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
3161 @@ -484,7 +484,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
3162 tbl = &arp_tbl;
3163 #if IS_ENABLED(CONFIG_IPV6)
3164 else if (m_neigh->family == AF_INET6)
3165 - tbl = ipv6_stub->nd_tbl;
3166 + tbl = &nd_tbl;
3167 #endif
3168 else
3169 return;
3170 @@ -2091,19 +2091,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
3171 if (err != -EAGAIN)
3172 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
3173
3174 + if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
3175 + !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
3176 + kvfree(parse_attr);
3177 +
3178 err = rhashtable_insert_fast(&tc->ht, &flow->node,
3179 tc->ht_params);
3180 - if (err)
3181 - goto err_del_rule;
3182 + if (err) {
3183 + mlx5e_tc_del_flow(priv, flow);
3184 + kfree(flow);
3185 + }
3186
3187 - if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
3188 - !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
3189 - kvfree(parse_attr);
3190 return err;
3191
3192 -err_del_rule:
3193 - mlx5e_tc_del_flow(priv, flow);
3194 -
3195 err_free:
3196 kvfree(parse_attr);
3197 kfree(flow);
3198 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
3199 index a1296a62497d..71153c0f1605 100644
3200 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
3201 +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
3202 @@ -36,6 +36,9 @@
3203 #include <linux/mlx5/vport.h>
3204 #include "mlx5_core.h"
3205
3206 +/* Mutex to hold while enabling or disabling RoCE */
3207 +static DEFINE_MUTEX(mlx5_roce_en_lock);
3208 +
3209 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
3210 u16 vport, u32 *out, int outlen)
3211 {
3212 @@ -998,17 +1001,35 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
3213
3214 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
3215 {
3216 - if (atomic_inc_return(&mdev->roce.roce_en) != 1)
3217 - return 0;
3218 - return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
3219 + int err = 0;
3220 +
3221 + mutex_lock(&mlx5_roce_en_lock);
3222 + if (!mdev->roce.roce_en)
3223 + err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
3224 +
3225 + if (!err)
3226 + mdev->roce.roce_en++;
3227 + mutex_unlock(&mlx5_roce_en_lock);
3228 +
3229 + return err;
3230 }
3231 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
3232
3233 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
3234 {
3235 - if (atomic_dec_return(&mdev->roce.roce_en) != 0)
3236 - return 0;
3237 - return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
3238 + int err = 0;
3239 +
3240 + mutex_lock(&mlx5_roce_en_lock);
3241 + if (mdev->roce.roce_en) {
3242 + mdev->roce.roce_en--;
3243 + if (mdev->roce.roce_en == 0)
3244 + err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
3245 +
3246 + if (err)
3247 + mdev->roce.roce_en++;
3248 + }
3249 + mutex_unlock(&mlx5_roce_en_lock);
3250 + return err;
3251 }
3252 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
3253
3254 diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
3255 index 37364555c42b..f88ff3f4b661 100644
3256 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
3257 +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
3258 @@ -68,10 +68,11 @@
3259 /* CPP address to retrieve the data from */
3260 #define NSP_BUFFER 0x10
3261 #define NSP_BUFFER_CPP GENMASK_ULL(63, 40)
3262 -#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38)
3263 -#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0)
3264 +#define NSP_BUFFER_ADDRESS GENMASK_ULL(39, 0)
3265
3266 #define NSP_DFLT_BUFFER 0x18
3267 +#define NSP_DFLT_BUFFER_CPP GENMASK_ULL(63, 40)
3268 +#define NSP_DFLT_BUFFER_ADDRESS GENMASK_ULL(39, 0)
3269
3270 #define NSP_DFLT_BUFFER_CONFIG 0x20
3271 #define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0)
3272 @@ -412,8 +413,8 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
3273 if (err < 0)
3274 return err;
3275
3276 - cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8;
3277 - cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg);
3278 + cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8;
3279 + cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg);
3280
3281 if (in_buf && in_size) {
3282 err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size);
3283 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
3284 index 619a1b7281a0..db553d4e8d22 100644
3285 --- a/drivers/net/ethernet/realtek/r8169.c
3286 +++ b/drivers/net/ethernet/realtek/r8169.c
3287 @@ -8466,12 +8466,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3288 goto err_out_msi_5;
3289 }
3290
3291 + pci_set_drvdata(pdev, dev);
3292 +
3293 rc = register_netdev(dev);
3294 if (rc < 0)
3295 goto err_out_cnt_6;
3296
3297 - pci_set_drvdata(pdev, dev);
3298 -
3299 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
3300 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
3301 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
3302 diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
3303 index 6dde9a0cfe76..9b70a3af678e 100644
3304 --- a/drivers/net/ppp/pptp.c
3305 +++ b/drivers/net/ppp/pptp.c
3306 @@ -464,7 +464,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
3307 po->chan.mtu = dst_mtu(&rt->dst);
3308 if (!po->chan.mtu)
3309 po->chan.mtu = PPP_MRU;
3310 - ip_rt_put(rt);
3311 po->chan.mtu -= PPTP_HEADER_OVERHEAD;
3312
3313 po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
3314 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
3315 index 23cd41c82210..2a366554c503 100644
3316 --- a/drivers/net/team/team.c
3317 +++ b/drivers/net/team/team.c
3318 @@ -1197,11 +1197,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
3319 goto err_dev_open;
3320 }
3321
3322 - netif_addr_lock_bh(dev);
3323 - dev_uc_sync_multiple(port_dev, dev);
3324 - dev_mc_sync_multiple(port_dev, dev);
3325 - netif_addr_unlock_bh(dev);
3326 -
3327 err = vlan_vids_add_by_dev(port_dev, dev);
3328 if (err) {
3329 netdev_err(dev, "Failed to add vlan ids to device %s\n",
3330 @@ -1241,6 +1236,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
3331 goto err_option_port_add;
3332 }
3333
3334 + netif_addr_lock_bh(dev);
3335 + dev_uc_sync_multiple(port_dev, dev);
3336 + dev_mc_sync_multiple(port_dev, dev);
3337 + netif_addr_unlock_bh(dev);
3338 +
3339 port->index = -1;
3340 list_add_tail_rcu(&port->list, &team->port_list);
3341 team_port_enable(team, port);
3342 @@ -1265,8 +1265,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
3343 vlan_vids_del_by_dev(port_dev, dev);
3344
3345 err_vids_add:
3346 - dev_uc_unsync(port_dev, dev);
3347 - dev_mc_unsync(port_dev, dev);
3348 dev_close(port_dev);
3349
3350 err_dev_open:
3351 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
3352 index a8dd1c7a08cb..89d82c4ee8df 100644
3353 --- a/drivers/net/usb/lan78xx.c
3354 +++ b/drivers/net/usb/lan78xx.c
3355 @@ -2863,8 +2863,7 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3356 if (ret < 0) {
3357 netdev_warn(dev->net,
3358 "lan78xx_setup_irq_domain() failed : %d", ret);
3359 - kfree(pdata);
3360 - return ret;
3361 + goto out1;
3362 }
3363
3364 dev->net->hard_header_len += TX_OVERHEAD;
3365 @@ -2872,14 +2871,32 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3366
3367 /* Init all registers */
3368 ret = lan78xx_reset(dev);
3369 + if (ret) {
3370 + netdev_warn(dev->net, "Registers INIT FAILED....");
3371 + goto out2;
3372 + }
3373
3374 ret = lan78xx_mdio_init(dev);
3375 + if (ret) {
3376 + netdev_warn(dev->net, "MDIO INIT FAILED.....");
3377 + goto out2;
3378 + }
3379
3380 dev->net->flags |= IFF_MULTICAST;
3381
3382 pdata->wol = WAKE_MAGIC;
3383
3384 return ret;
3385 +
3386 +out2:
3387 + lan78xx_remove_irq_domain(dev);
3388 +
3389 +out1:
3390 + netdev_warn(dev->net, "Bind routine FAILED");
3391 + cancel_work_sync(&pdata->set_multicast);
3392 + cancel_work_sync(&pdata->set_vlan);
3393 + kfree(pdata);
3394 + return ret;
3395 }
3396
3397 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3398 @@ -2891,6 +2908,8 @@ static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3399 lan78xx_remove_mdio(dev);
3400
3401 if (pdata) {
3402 + cancel_work_sync(&pdata->set_multicast);
3403 + cancel_work_sync(&pdata->set_vlan);
3404 netif_dbg(dev, ifdown, dev->net, "free pdata");
3405 kfree(pdata);
3406 pdata = NULL;
3407 diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
3408 index 67ecf2425b88..5c6a8ef54aec 100644
3409 --- a/drivers/net/vrf.c
3410 +++ b/drivers/net/vrf.c
3411 @@ -579,12 +579,13 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
3412 if (!IS_ERR(neigh)) {
3413 sock_confirm_neigh(skb, neigh);
3414 ret = neigh_output(neigh, skb);
3415 + rcu_read_unlock_bh();
3416 + return ret;
3417 }
3418
3419 rcu_read_unlock_bh();
3420 err:
3421 - if (unlikely(ret < 0))
3422 - vrf_tx_error(skb->dev, skb);
3423 + vrf_tx_error(skb->dev, skb);
3424 return ret;
3425 }
3426
3427 diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
3428 index ecc96312a370..6fe0c6abe0d6 100644
3429 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
3430 +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
3431 @@ -142,15 +142,25 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
3432 if (!rt2x00dev->ops->hw->set_rts_threshold &&
3433 (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
3434 IEEE80211_TX_RC_USE_CTS_PROTECT))) {
3435 - if (rt2x00queue_available(queue) <= 1)
3436 - goto exit_fail;
3437 + if (rt2x00queue_available(queue) <= 1) {
3438 + /*
3439 + * Recheck for full queue under lock to avoid race
3440 + * conditions with rt2x00lib_txdone().
3441 + */
3442 + spin_lock(&queue->tx_lock);
3443 + if (rt2x00queue_threshold(queue))
3444 + rt2x00queue_pause_queue(queue);
3445 + spin_unlock(&queue->tx_lock);
3446 +
3447 + goto exit_free_skb;
3448 + }
3449
3450 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
3451 - goto exit_fail;
3452 + goto exit_free_skb;
3453 }
3454
3455 if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
3456 - goto exit_fail;
3457 + goto exit_free_skb;
3458
3459 /*
3460 * Pausing queue has to be serialized with rt2x00lib_txdone(). Note
3461 @@ -164,10 +174,6 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
3462
3463 return;
3464
3465 - exit_fail:
3466 - spin_lock(&queue->tx_lock);
3467 - rt2x00queue_pause_queue(queue);
3468 - spin_unlock(&queue->tx_lock);
3469 exit_free_skb:
3470 ieee80211_free_txskb(hw, skb);
3471 }
3472 diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
3473 index 6d02c660b4ab..037defd10b91 100644
3474 --- a/drivers/net/wireless/ti/wl1251/main.c
3475 +++ b/drivers/net/wireless/ti/wl1251/main.c
3476 @@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
3477 WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
3478
3479 enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
3480 - wl1251_acx_arp_ip_filter(wl, enable, addr);
3481 -
3482 + ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
3483 if (ret < 0)
3484 goto out_sleep;
3485 }
3486 diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
3487 index 7b75d9de55ab..c0080f6ab2f5 100644
3488 --- a/drivers/nvme/target/fcloop.c
3489 +++ b/drivers/nvme/target/fcloop.c
3490 @@ -204,6 +204,10 @@ struct fcloop_lport {
3491 struct completion unreg_done;
3492 };
3493
3494 +struct fcloop_lport_priv {
3495 + struct fcloop_lport *lport;
3496 +};
3497 +
3498 struct fcloop_rport {
3499 struct nvme_fc_remote_port *remoteport;
3500 struct nvmet_fc_target_port *targetport;
3501 @@ -370,6 +374,7 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
3502
3503 spin_lock(&tfcp_req->reqlock);
3504 fcpreq = tfcp_req->fcpreq;
3505 + tfcp_req->fcpreq = NULL;
3506 spin_unlock(&tfcp_req->reqlock);
3507
3508 if (tport->remoteport && fcpreq) {
3509 @@ -611,11 +616,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
3510
3511 if (!tfcp_req)
3512 /* abort has already been called */
3513 - return;
3514 -
3515 - if (rport->targetport)
3516 - nvmet_fc_rcv_fcp_abort(rport->targetport,
3517 - &tfcp_req->tgt_fcp_req);
3518 + goto finish;
3519
3520 /* break initiator/target relationship for io */
3521 spin_lock(&tfcp_req->reqlock);
3522 @@ -623,6 +624,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
3523 tfcp_req->fcpreq = NULL;
3524 spin_unlock(&tfcp_req->reqlock);
3525
3526 + if (rport->targetport)
3527 + nvmet_fc_rcv_fcp_abort(rport->targetport,
3528 + &tfcp_req->tgt_fcp_req);
3529 +
3530 +finish:
3531 /* post the aborted io completion */
3532 fcpreq->status = -ECANCELED;
3533 schedule_work(&inireq->iniwork);
3534 @@ -657,7 +663,8 @@ fcloop_nport_get(struct fcloop_nport *nport)
3535 static void
3536 fcloop_localport_delete(struct nvme_fc_local_port *localport)
3537 {
3538 - struct fcloop_lport *lport = localport->private;
3539 + struct fcloop_lport_priv *lport_priv = localport->private;
3540 + struct fcloop_lport *lport = lport_priv->lport;
3541
3542 /* release any threads waiting for the unreg to complete */
3543 complete(&lport->unreg_done);
3544 @@ -697,7 +704,7 @@ static struct nvme_fc_port_template fctemplate = {
3545 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
3546 .dma_boundary = FCLOOP_DMABOUND_4G,
3547 /* sizes of additional private data for data structures */
3548 - .local_priv_sz = sizeof(struct fcloop_lport),
3549 + .local_priv_sz = sizeof(struct fcloop_lport_priv),
3550 .remote_priv_sz = sizeof(struct fcloop_rport),
3551 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
3552 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
3553 @@ -728,11 +735,17 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
3554 struct fcloop_ctrl_options *opts;
3555 struct nvme_fc_local_port *localport;
3556 struct fcloop_lport *lport;
3557 - int ret;
3558 + struct fcloop_lport_priv *lport_priv;
3559 + unsigned long flags;
3560 + int ret = -ENOMEM;
3561 +
3562 + lport = kzalloc(sizeof(*lport), GFP_KERNEL);
3563 + if (!lport)
3564 + return -ENOMEM;
3565
3566 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
3567 if (!opts)
3568 - return -ENOMEM;
3569 + goto out_free_lport;
3570
3571 ret = fcloop_parse_options(opts, buf);
3572 if (ret)
3573 @@ -752,23 +765,25 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
3574
3575 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
3576 if (!ret) {
3577 - unsigned long flags;
3578 -
3579 /* success */
3580 - lport = localport->private;
3581 + lport_priv = localport->private;
3582 + lport_priv->lport = lport;
3583 +
3584 lport->localport = localport;
3585 INIT_LIST_HEAD(&lport->lport_list);
3586
3587 spin_lock_irqsave(&fcloop_lock, flags);
3588 list_add_tail(&lport->lport_list, &fcloop_lports);
3589 spin_unlock_irqrestore(&fcloop_lock, flags);
3590 -
3591 - /* mark all of the input buffer consumed */
3592 - ret = count;
3593 }
3594
3595 out_free_opts:
3596 kfree(opts);
3597 +out_free_lport:
3598 + /* free only if we're going to fail */
3599 + if (ret)
3600 + kfree(lport);
3601 +
3602 return ret ? ret : count;
3603 }
3604
3605 @@ -790,6 +805,8 @@ __wait_localport_unreg(struct fcloop_lport *lport)
3606
3607 wait_for_completion(&lport->unreg_done);
3608
3609 + kfree(lport);
3610 +
3611 return ret;
3612 }
3613
3614 diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
3615 index 0f3a02495aeb..beeb7cbb5015 100644
3616 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c
3617 +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
3618 @@ -46,6 +46,9 @@
3619 #define BYT_TRIG_POS BIT(25)
3620 #define BYT_TRIG_LVL BIT(24)
3621 #define BYT_DEBOUNCE_EN BIT(20)
3622 +#define BYT_GLITCH_FILTER_EN BIT(19)
3623 +#define BYT_GLITCH_F_SLOW_CLK BIT(17)
3624 +#define BYT_GLITCH_F_FAST_CLK BIT(16)
3625 #define BYT_PULL_STR_SHIFT 9
3626 #define BYT_PULL_STR_MASK (3 << BYT_PULL_STR_SHIFT)
3627 #define BYT_PULL_STR_2K (0 << BYT_PULL_STR_SHIFT)
3628 @@ -1579,6 +1582,9 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
3629 */
3630 value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
3631 BYT_TRIG_LVL);
3632 + /* Enable glitch filtering */
3633 + value |= BYT_GLITCH_FILTER_EN | BYT_GLITCH_F_SLOW_CLK |
3634 + BYT_GLITCH_F_FAST_CLK;
3635
3636 writel(value, reg);
3637
3638 diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
3639 index d51ebd1da65e..9dc7590e07cb 100644
3640 --- a/drivers/power/supply/axp288_charger.c
3641 +++ b/drivers/power/supply/axp288_charger.c
3642 @@ -785,6 +785,14 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
3643 return 0;
3644 }
3645
3646 +static void axp288_charger_cancel_work(void *data)
3647 +{
3648 + struct axp288_chrg_info *info = data;
3649 +
3650 + cancel_work_sync(&info->otg.work);
3651 + cancel_work_sync(&info->cable.work);
3652 +}
3653 +
3654 static int axp288_charger_probe(struct platform_device *pdev)
3655 {
3656 int ret, i, pirq;
3657 @@ -836,6 +844,11 @@ static int axp288_charger_probe(struct platform_device *pdev)
3658 return ret;
3659 }
3660
3661 + /* Cancel our work on cleanup, register this before the notifiers */
3662 + ret = devm_add_action(dev, axp288_charger_cancel_work, info);
3663 + if (ret)
3664 + return ret;
3665 +
3666 /* Register for extcon notification */
3667 INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
3668 info->cable.nb[0].notifier_call = axp288_charger_handle_cable0_evt;
3669 diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c
3670 index 0e358d4b6738..8ff9dc3fe5bf 100644
3671 --- a/drivers/rtc/rtc-ac100.c
3672 +++ b/drivers/rtc/rtc-ac100.c
3673 @@ -137,13 +137,15 @@ static unsigned long ac100_clkout_recalc_rate(struct clk_hw *hw,
3674 div = (reg >> AC100_CLKOUT_PRE_DIV_SHIFT) &
3675 ((1 << AC100_CLKOUT_PRE_DIV_WIDTH) - 1);
3676 prate = divider_recalc_rate(hw, prate, div,
3677 - ac100_clkout_prediv, 0);
3678 + ac100_clkout_prediv, 0,
3679 + AC100_CLKOUT_PRE_DIV_WIDTH);
3680 }
3681
3682 div = (reg >> AC100_CLKOUT_DIV_SHIFT) &
3683 (BIT(AC100_CLKOUT_DIV_WIDTH) - 1);
3684 return divider_recalc_rate(hw, prate, div, NULL,
3685 - CLK_DIVIDER_POWER_OF_TWO);
3686 + CLK_DIVIDER_POWER_OF_TWO,
3687 + AC100_CLKOUT_DIV_WIDTH);
3688 }
3689
3690 static long ac100_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
3691 diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
3692 index f8dc1601efd5..bddbe2da5283 100644
3693 --- a/drivers/scsi/libiscsi.c
3694 +++ b/drivers/scsi/libiscsi.c
3695 @@ -1696,6 +1696,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
3696 */
3697 switch (session->state) {
3698 case ISCSI_STATE_FAILED:
3699 + /*
3700 + * cmds should fail during shutdown, if the session
3701 + * state is bad, allowing completion to happen
3702 + */
3703 + if (unlikely(system_state != SYSTEM_RUNNING)) {
3704 + reason = FAILURE_SESSION_FAILED;
3705 + sc->result = DID_NO_CONNECT << 16;
3706 + break;
3707 + }
3708 case ISCSI_STATE_IN_RECOVERY:
3709 reason = FAILURE_SESSION_IN_RECOVERY;
3710 sc->result = DID_IMM_RETRY << 16;
3711 @@ -1980,6 +1989,19 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
3712 }
3713
3714 if (session->state != ISCSI_STATE_LOGGED_IN) {
3715 + /*
3716 + * During shutdown, if session is prematurely disconnected,
3717 + * recovery won't happen and there will be hung cmds. Not
3718 + * handling cmds would trigger EH, also bad in this case.
3719 + * Instead, handle cmd, allow completion to happen and let
3720 + * upper layer to deal with the result.
3721 + */
3722 + if (unlikely(system_state != SYSTEM_RUNNING)) {
3723 + sc->result = DID_NO_CONNECT << 16;
3724 + ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
3725 + rc = BLK_EH_HANDLED;
3726 + goto done;
3727 + }
3728 /*
3729 * We are probably in the middle of iscsi recovery so let
3730 * that complete and handle the error.
3731 @@ -2084,7 +2106,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
3732 task->last_timeout = jiffies;
3733 spin_unlock(&session->frwd_lock);
3734 ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
3735 - "timer reset" : "nh");
3736 + "timer reset" : "shutdown or nh");
3737 return rc;
3738 }
3739 EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
3740 diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
3741 index 324d8d8c62de..e2ea389fbec3 100644
3742 --- a/drivers/scsi/libsas/sas_expander.c
3743 +++ b/drivers/scsi/libsas/sas_expander.c
3744 @@ -293,6 +293,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
3745 phy->phy->minimum_linkrate = dr->pmin_linkrate;
3746 phy->phy->maximum_linkrate = dr->pmax_linkrate;
3747 phy->phy->negotiated_linkrate = phy->linkrate;
3748 + phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
3749
3750 skip:
3751 if (new_phy)
3752 @@ -686,7 +687,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
3753 res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
3754 resp, RPEL_RESP_SIZE);
3755
3756 - if (!res)
3757 + if (res)
3758 goto out;
3759
3760 phy->invalid_dword_count = scsi_to_u32(&resp[12]);
3761 @@ -695,6 +696,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
3762 phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
3763
3764 out:
3765 + kfree(req);
3766 kfree(resp);
3767 return res;
3768
3769 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
3770 index e518dadc8161..4beb4dd2bee8 100644
3771 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
3772 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
3773 @@ -6605,7 +6605,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
3774 u32 pd_seq_map_sz;
3775
3776 instance = pci_get_drvdata(pdev);
3777 - instance->unload = 1;
3778 host = instance->host;
3779 fusion = instance->ctrl_context;
3780
3781 @@ -6616,6 +6615,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
3782 if (instance->fw_crash_state != UNAVAILABLE)
3783 megasas_free_host_crash_buffer(instance);
3784 scsi_remove_host(instance->host);
3785 + instance->unload = 1;
3786
3787 if (megasas_wait_for_adapter_operational(instance))
3788 goto skip_firing_dcmds;
3789 diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
3790 index ecc699a65bac..08945142b9f8 100644
3791 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c
3792 +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
3793 @@ -168,7 +168,7 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
3794 /*
3795 * This function will Populate Driver Map using firmware raid map
3796 */
3797 -void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
3798 +static int MR_PopulateDrvRaidMap(struct megasas_instance *instance)
3799 {
3800 struct fusion_context *fusion = instance->ctrl_context;
3801 struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
3802 @@ -259,7 +259,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
3803 ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
3804 if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
3805 dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
3806 - return;
3807 + return 1;
3808 }
3809
3810 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
3811 @@ -285,6 +285,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
3812 fusion->ld_map[(instance->map_id & 1)];
3813 pFwRaidMap = &fw_map_old->raidMap;
3814 ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
3815 + if (ld_count > MAX_LOGICAL_DRIVES) {
3816 + dev_dbg(&instance->pdev->dev,
3817 + "LD count exposed in RAID map in not valid\n");
3818 + return 1;
3819 + }
3820 +
3821 pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
3822 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
3823 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
3824 @@ -300,6 +306,8 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
3825 sizeof(struct MR_DEV_HANDLE_INFO) *
3826 MAX_RAIDMAP_PHYSICAL_DEVICES);
3827 }
3828 +
3829 + return 0;
3830 }
3831
3832 /*
3833 @@ -317,8 +325,8 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
3834 u16 ld;
3835 u32 expected_size;
3836
3837 -
3838 - MR_PopulateDrvRaidMap(instance);
3839 + if (MR_PopulateDrvRaidMap(instance))
3840 + return 0;
3841
3842 fusion = instance->ctrl_context;
3843 drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
3844 diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
3845 index beb4bf8fe9b0..139219c994e9 100644
3846 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
3847 +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
3848 @@ -4106,19 +4106,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3849 return 0;
3850 }
3851
3852 - /*
3853 - * Bug work around for firmware SATL handling. The loop
3854 - * is based on atomic operations and ensures consistency
3855 - * since we're lockless at this point
3856 - */
3857 - do {
3858 - if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
3859 - scmd->result = SAM_STAT_BUSY;
3860 - scmd->scsi_done(scmd);
3861 - return 0;
3862 - }
3863 - } while (_scsih_set_satl_pending(scmd, true));
3864 -
3865 sas_target_priv_data = sas_device_priv_data->sas_target;
3866
3867 /* invalid device handle */
3868 @@ -4144,6 +4131,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3869 sas_device_priv_data->block)
3870 return SCSI_MLQUEUE_DEVICE_BUSY;
3871
3872 + /*
3873 + * Bug work around for firmware SATL handling. The loop
3874 + * is based on atomic operations and ensures consistency
3875 + * since we're lockless at this point
3876 + */
3877 + do {
3878 + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
3879 + scmd->result = SAM_STAT_BUSY;
3880 + scmd->scsi_done(scmd);
3881 + return 0;
3882 + }
3883 + } while (_scsih_set_satl_pending(scmd, true));
3884 +
3885 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
3886 mpi_control = MPI2_SCSIIO_CONTROL_READ;
3887 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
3888 @@ -4170,6 +4170,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3889 if (!smid) {
3890 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3891 ioc->name, __func__);
3892 + _scsih_set_satl_pending(scmd, false);
3893 goto out;
3894 }
3895 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3896 @@ -4200,6 +4201,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3897 if (mpi_request->DataLength) {
3898 if (ioc->build_sg_scmd(ioc, scmd, smid)) {
3899 mpt3sas_base_free_smid(ioc, smid);
3900 + _scsih_set_satl_pending(scmd, false);
3901 goto out;
3902 }
3903 } else
3904 diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
3905 index 092a5fc85b9a..2770fbd4ce49 100644
3906 --- a/drivers/spi/spi-sh-msiof.c
3907 +++ b/drivers/spi/spi-sh-msiof.c
3908 @@ -797,11 +797,21 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
3909 goto stop_dma;
3910 }
3911
3912 - /* wait for tx fifo to be emptied / rx fifo to be filled */
3913 + /* wait for tx/rx DMA completion */
3914 ret = sh_msiof_wait_for_completion(p);
3915 if (ret)
3916 goto stop_reset;
3917
3918 + if (!rx) {
3919 + reinit_completion(&p->done);
3920 + sh_msiof_write(p, IER, IER_TEOFE);
3921 +
3922 + /* wait for tx fifo to be emptied */
3923 + ret = sh_msiof_wait_for_completion(p);
3924 + if (ret)
3925 + goto stop_reset;
3926 + }
3927 +
3928 /* clear status bits */
3929 sh_msiof_reset_str(p);
3930
3931 diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
3932 index 2da051c0d251..a4bb93b440a5 100644
3933 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
3934 +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
3935 @@ -528,19 +528,20 @@ EXPORT_SYMBOL(cfs_cpt_spread_node);
3936 int
3937 cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
3938 {
3939 - int cpu = smp_processor_id();
3940 - int cpt = cptab->ctb_cpu2cpt[cpu];
3941 + int cpu;
3942 + int cpt;
3943
3944 - if (cpt < 0) {
3945 - if (!remap)
3946 - return cpt;
3947 + preempt_disable();
3948 + cpu = smp_processor_id();
3949 + cpt = cptab->ctb_cpu2cpt[cpu];
3950
3951 + if (cpt < 0 && remap) {
3952 /* don't return negative value for safety of upper layer,
3953 * instead we shadow the unknown cpu to a valid partition ID
3954 */
3955 cpt = cpu % cptab->ctb_nparts;
3956 }
3957 -
3958 + preempt_enable();
3959 return cpt;
3960 }
3961 EXPORT_SYMBOL(cfs_cpt_current);
3962 diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
3963 index 942d094269fb..c4a5fb6f038f 100644
3964 --- a/drivers/target/target_core_user.c
3965 +++ b/drivers/target/target_core_user.c
3966 @@ -796,6 +796,13 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
3967 int ret;
3968 DEFINE_WAIT(__wait);
3969
3970 + /*
3971 + * Don't leave commands partially setup because the unmap
3972 + * thread might need the blocks to make forward progress.
3973 + */
3974 + tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
3975 + tcmu_cmd_reset_dbi_cur(tcmu_cmd);
3976 +
3977 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
3978
3979 pr_debug("sleeping for ring space\n");
3980 diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
3981 index 8ee38f55c7f3..43b90fd577e4 100644
3982 --- a/drivers/thermal/int340x_thermal/int3400_thermal.c
3983 +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
3984 @@ -319,17 +319,21 @@ static int int3400_thermal_probe(struct platform_device *pdev)
3985
3986 result = sysfs_create_group(&pdev->dev.kobj, &uuid_attribute_group);
3987 if (result)
3988 - goto free_zone;
3989 + goto free_rel_misc;
3990
3991 result = acpi_install_notify_handler(
3992 priv->adev->handle, ACPI_DEVICE_NOTIFY, int3400_notify,
3993 (void *)priv);
3994 if (result)
3995 - goto free_zone;
3996 + goto free_sysfs;
3997
3998 return 0;
3999
4000 -free_zone:
4001 +free_sysfs:
4002 + sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
4003 +free_rel_misc:
4004 + if (!priv->rel_misc_dev_res)
4005 + acpi_thermal_rel_misc_device_remove(priv->adev->handle);
4006 thermal_zone_device_unregister(priv->thermal);
4007 free_art_trt:
4008 kfree(priv->trts);
4009 diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
4010 index b4d3116cfdaf..3055f9a12a17 100644
4011 --- a/drivers/thermal/power_allocator.c
4012 +++ b/drivers/thermal/power_allocator.c
4013 @@ -523,6 +523,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
4014 struct thermal_instance *instance;
4015 struct power_allocator_params *params = tz->governor_data;
4016
4017 + mutex_lock(&tz->lock);
4018 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
4019 if ((instance->trip != params->trip_max_desired_temperature) ||
4020 (!cdev_is_power_actor(instance->cdev)))
4021 @@ -534,6 +535,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
4022 mutex_unlock(&instance->cdev->lock);
4023 thermal_cdev_update(instance->cdev);
4024 }
4025 + mutex_unlock(&tz->lock);
4026 }
4027
4028 /**
4029 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
4030 index 0a3c9665e015..7253e8d2c6d9 100644
4031 --- a/drivers/tty/n_gsm.c
4032 +++ b/drivers/tty/n_gsm.c
4033 @@ -1463,6 +1463,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
4034 * in which case an opening port goes back to closed and a closing port
4035 * is simply put into closed state (any further frames from the other
4036 * end will get a DM response)
4037 + *
4038 + * Some control dlci can stay in ADM mode with other dlci working just
4039 + * fine. In that case we can just keep the control dlci open after the
4040 + * DLCI_OPENING retries time out.
4041 */
4042
4043 static void gsm_dlci_t1(unsigned long data)
4044 @@ -1476,8 +1480,15 @@ static void gsm_dlci_t1(unsigned long data)
4045 if (dlci->retries) {
4046 gsm_command(dlci->gsm, dlci->addr, SABM|PF);
4047 mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
4048 - } else
4049 + } else if (!dlci->addr && gsm->control == (DM | PF)) {
4050 + if (debug & 8)
4051 + pr_info("DLCI %d opening in ADM mode.\n",
4052 + dlci->addr);
4053 + gsm_dlci_open(dlci);
4054 + } else {
4055 gsm_dlci_close(dlci);
4056 + }
4057 +
4058 break;
4059 case DLCI_CLOSING:
4060 dlci->retries--;
4061 @@ -1495,8 +1506,8 @@ static void gsm_dlci_t1(unsigned long data)
4062 * @dlci: DLCI to open
4063 *
4064 * Commence opening a DLCI from the Linux side. We issue SABM messages
4065 - * to the modem which should then reply with a UA, at which point we
4066 - * will move into open state. Opening is done asynchronously with retry
4067 + * to the modem which should then reply with a UA or ADM, at which point
4068 + * we will move into open state. Opening is done asynchronously with retry
4069 * running off timers and the responses.
4070 */
4071
4072 diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
4073 index 48d5327d38d4..fe5cdda80b2c 100644
4074 --- a/drivers/uio/uio_hv_generic.c
4075 +++ b/drivers/uio/uio_hv_generic.c
4076 @@ -124,6 +124,13 @@ hv_uio_probe(struct hv_device *dev,
4077 if (ret)
4078 goto fail;
4079
4080 + /* Communicating with host has to be via shared memory not hypercall */
4081 + if (!dev->channel->offermsg.monitor_allocated) {
4082 + dev_err(&dev->device, "vmbus channel requires hypercall\n");
4083 + ret = -ENOTSUPP;
4084 + goto fail_close;
4085 + }
4086 +
4087 dev->channel->inbound.ring_buffer->interrupt_mask = 1;
4088 set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
4089
4090 diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
4091 index 082891dffd9d..b0d606b2d06c 100644
4092 --- a/drivers/vhost/net.c
4093 +++ b/drivers/vhost/net.c
4094 @@ -622,7 +622,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
4095
4096 if (!len && vq->busyloop_timeout) {
4097 /* Both tx vq and rx socket were polled here */
4098 - mutex_lock(&vq->mutex);
4099 + mutex_lock_nested(&vq->mutex, 1);
4100 vhost_disable_notify(&net->dev, vq);
4101
4102 preempt_disable();
4103 @@ -755,7 +755,7 @@ static void handle_rx(struct vhost_net *net)
4104 struct iov_iter fixup;
4105 __virtio16 num_buffers;
4106
4107 - mutex_lock(&vq->mutex);
4108 + mutex_lock_nested(&vq->mutex, 0);
4109 sock = vq->private_data;
4110 if (!sock)
4111 goto out;
4112 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
4113 index a827c1a684a9..c692e0b13242 100644
4114 --- a/drivers/vhost/vhost.c
4115 +++ b/drivers/vhost/vhost.c
4116 @@ -213,8 +213,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
4117 if (mask)
4118 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
4119 if (mask & POLLERR) {
4120 - if (poll->wqh)
4121 - remove_wait_queue(poll->wqh, &poll->wait);
4122 + vhost_poll_stop(poll);
4123 ret = -EINVAL;
4124 }
4125
4126 @@ -1253,14 +1252,12 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
4127 /* Caller should have vq mutex and device mutex */
4128 int vhost_vq_access_ok(struct vhost_virtqueue *vq)
4129 {
4130 - if (vq->iotlb) {
4131 - /* When device IOTLB was used, the access validation
4132 - * will be validated during prefetching.
4133 - */
4134 - return 1;
4135 - }
4136 - return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
4137 - vq_log_access_ok(vq, vq->log_base);
4138 + int ret = vq_log_access_ok(vq, vq->log_base);
4139 +
4140 + if (ret || vq->iotlb)
4141 + return ret;
4142 +
4143 + return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
4144 }
4145 EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
4146
4147 diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
4148 index d7c239ea3d09..f5574060f9c8 100644
4149 --- a/drivers/video/backlight/corgi_lcd.c
4150 +++ b/drivers/video/backlight/corgi_lcd.c
4151 @@ -177,7 +177,7 @@ static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data)
4152 struct spi_message msg;
4153 struct spi_transfer xfer = {
4154 .len = 1,
4155 - .cs_change = 1,
4156 + .cs_change = 0,
4157 .tx_buf = lcd->buf,
4158 };
4159
4160 diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
4161 index eab1f842f9c0..e4bd63e9db6b 100644
4162 --- a/drivers/video/backlight/tdo24m.c
4163 +++ b/drivers/video/backlight/tdo24m.c
4164 @@ -369,7 +369,7 @@ static int tdo24m_probe(struct spi_device *spi)
4165
4166 spi_message_init(m);
4167
4168 - x->cs_change = 1;
4169 + x->cs_change = 0;
4170 x->tx_buf = &lcd->buf[0];
4171 spi_message_add_tail(x, m);
4172
4173 diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
4174 index 6a41ea92737a..4dc5ee8debeb 100644
4175 --- a/drivers/video/backlight/tosa_lcd.c
4176 +++ b/drivers/video/backlight/tosa_lcd.c
4177 @@ -49,7 +49,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
4178 struct spi_message msg;
4179 struct spi_transfer xfer = {
4180 .len = 1,
4181 - .cs_change = 1,
4182 + .cs_change = 0,
4183 .tx_buf = buf,
4184 };
4185
4186 diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
4187 index da653a080394..54127905bfe7 100644
4188 --- a/drivers/video/fbdev/vfb.c
4189 +++ b/drivers/video/fbdev/vfb.c
4190 @@ -239,8 +239,23 @@ static int vfb_check_var(struct fb_var_screeninfo *var,
4191 */
4192 static int vfb_set_par(struct fb_info *info)
4193 {
4194 + switch (info->var.bits_per_pixel) {
4195 + case 1:
4196 + info->fix.visual = FB_VISUAL_MONO01;
4197 + break;
4198 + case 8:
4199 + info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
4200 + break;
4201 + case 16:
4202 + case 24:
4203 + case 32:
4204 + info->fix.visual = FB_VISUAL_TRUECOLOR;
4205 + break;
4206 + }
4207 +
4208 info->fix.line_length = get_line_length(info->var.xres_virtual,
4209 info->var.bits_per_pixel);
4210 +
4211 return 0;
4212 }
4213
4214 @@ -450,6 +465,8 @@ static int vfb_probe(struct platform_device *dev)
4215 goto err2;
4216 platform_set_drvdata(dev, info);
4217
4218 + vfb_set_par(info);
4219 +
4220 fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n",
4221 videomemorysize >> 10);
4222 return 0;
4223 diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
4224 index 36be987ff9ef..c2f4ff516230 100644
4225 --- a/drivers/watchdog/dw_wdt.c
4226 +++ b/drivers/watchdog/dw_wdt.c
4227 @@ -127,14 +127,27 @@ static int dw_wdt_start(struct watchdog_device *wdd)
4228
4229 dw_wdt_set_timeout(wdd, wdd->timeout);
4230
4231 - set_bit(WDOG_HW_RUNNING, &wdd->status);
4232 -
4233 writel(WDOG_CONTROL_REG_WDT_EN_MASK,
4234 dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
4235
4236 return 0;
4237 }
4238
4239 +static int dw_wdt_stop(struct watchdog_device *wdd)
4240 +{
4241 + struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
4242 +
4243 + if (!dw_wdt->rst) {
4244 + set_bit(WDOG_HW_RUNNING, &wdd->status);
4245 + return 0;
4246 + }
4247 +
4248 + reset_control_assert(dw_wdt->rst);
4249 + reset_control_deassert(dw_wdt->rst);
4250 +
4251 + return 0;
4252 +}
4253 +
4254 static int dw_wdt_restart(struct watchdog_device *wdd,
4255 unsigned long action, void *data)
4256 {
4257 @@ -173,6 +186,7 @@ static const struct watchdog_info dw_wdt_ident = {
4258 static const struct watchdog_ops dw_wdt_ops = {
4259 .owner = THIS_MODULE,
4260 .start = dw_wdt_start,
4261 + .stop = dw_wdt_stop,
4262 .ping = dw_wdt_ping,
4263 .set_timeout = dw_wdt_set_timeout,
4264 .get_timeleft = dw_wdt_get_timeleft,
4265 diff --git a/fs/dcache.c b/fs/dcache.c
4266 index a1417787e269..c28b9c91b5cb 100644
4267 --- a/fs/dcache.c
4268 +++ b/fs/dcache.c
4269 @@ -468,9 +468,11 @@ static void dentry_lru_add(struct dentry *dentry)
4270 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
4271 * reason (NFS timeouts or autofs deletes).
4272 *
4273 - * __d_drop requires dentry->d_lock.
4274 + * __d_drop requires dentry->d_lock
4275 + * ___d_drop doesn't mark dentry as "unhashed"
4276 + * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
4277 */
4278 -void __d_drop(struct dentry *dentry)
4279 +static void ___d_drop(struct dentry *dentry)
4280 {
4281 if (!d_unhashed(dentry)) {
4282 struct hlist_bl_head *b;
4283 @@ -486,12 +488,17 @@ void __d_drop(struct dentry *dentry)
4284
4285 hlist_bl_lock(b);
4286 __hlist_bl_del(&dentry->d_hash);
4287 - dentry->d_hash.pprev = NULL;
4288 hlist_bl_unlock(b);
4289 /* After this call, in-progress rcu-walk path lookup will fail. */
4290 write_seqcount_invalidate(&dentry->d_seq);
4291 }
4292 }
4293 +
4294 +void __d_drop(struct dentry *dentry)
4295 +{
4296 + ___d_drop(dentry);
4297 + dentry->d_hash.pprev = NULL;
4298 +}
4299 EXPORT_SYMBOL(__d_drop);
4300
4301 void d_drop(struct dentry *dentry)
4302 @@ -2386,7 +2393,7 @@ EXPORT_SYMBOL(d_delete);
4303 static void __d_rehash(struct dentry *entry)
4304 {
4305 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
4306 - BUG_ON(!d_unhashed(entry));
4307 +
4308 hlist_bl_lock(b);
4309 hlist_bl_add_head_rcu(&entry->d_hash, b);
4310 hlist_bl_unlock(b);
4311 @@ -2821,9 +2828,9 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
4312 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
4313
4314 /* unhash both */
4315 - /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
4316 - __d_drop(dentry);
4317 - __d_drop(target);
4318 + /* ___d_drop does write_seqcount_barrier, but they're OK to nest. */
4319 + ___d_drop(dentry);
4320 + ___d_drop(target);
4321
4322 /* Switch the names.. */
4323 if (exchange)
4324 @@ -2835,6 +2842,8 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
4325 __d_rehash(dentry);
4326 if (exchange)
4327 __d_rehash(target);
4328 + else
4329 + target->d_hash.pprev = NULL;
4330
4331 /* ... and switch them in the tree */
4332 if (IS_ROOT(dentry)) {
4333 diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
4334 index 5100ec1b5d55..86eb33f67618 100644
4335 --- a/include/linux/clk-provider.h
4336 +++ b/include/linux/clk-provider.h
4337 @@ -412,7 +412,7 @@ extern const struct clk_ops clk_divider_ro_ops;
4338
4339 unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
4340 unsigned int val, const struct clk_div_table *table,
4341 - unsigned long flags);
4342 + unsigned long flags, unsigned long width);
4343 long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
4344 unsigned long rate, unsigned long *prate,
4345 const struct clk_div_table *table,
4346 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
4347 index bfb4a9d962a5..f2f9e957bf1b 100644
4348 --- a/include/linux/mlx5/driver.h
4349 +++ b/include/linux/mlx5/driver.h
4350 @@ -794,7 +794,7 @@ struct mlx5_core_dev {
4351 struct mlx5e_resources mlx5e_res;
4352 struct {
4353 struct mlx5_rsvd_gids reserved_gids;
4354 - atomic_t roce_en;
4355 + u32 roce_en;
4356 } roce;
4357 #ifdef CONFIG_MLX5_FPGA
4358 struct mlx5_fpga_device *fpga;
4359 diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
4360 index f7e83f6d2e64..236452ebbd9e 100644
4361 --- a/net/8021q/vlan_dev.c
4362 +++ b/net/8021q/vlan_dev.c
4363 @@ -29,6 +29,7 @@
4364 #include <linux/net_tstamp.h>
4365 #include <linux/etherdevice.h>
4366 #include <linux/ethtool.h>
4367 +#include <linux/phy.h>
4368 #include <net/arp.h>
4369 #include <net/switchdev.h>
4370
4371 @@ -665,8 +666,11 @@ static int vlan_ethtool_get_ts_info(struct net_device *dev,
4372 {
4373 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
4374 const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
4375 + struct phy_device *phydev = vlan->real_dev->phydev;
4376
4377 - if (ops->get_ts_info) {
4378 + if (phydev && phydev->drv && phydev->drv->ts_info) {
4379 + return phydev->drv->ts_info(phydev, info);
4380 + } else if (ops->get_ts_info) {
4381 return ops->get_ts_info(vlan->real_dev, info);
4382 } else {
4383 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
4384 diff --git a/net/core/dev.c b/net/core/dev.c
4385 index 387af3415385..4be2a4047640 100644
4386 --- a/net/core/dev.c
4387 +++ b/net/core/dev.c
4388 @@ -1025,7 +1025,7 @@ bool dev_valid_name(const char *name)
4389 {
4390 if (*name == '\0')
4391 return false;
4392 - if (strlen(name) >= IFNAMSIZ)
4393 + if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
4394 return false;
4395 if (!strcmp(name, ".") || !strcmp(name, ".."))
4396 return false;
4397 @@ -2696,7 +2696,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
4398 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
4399 return 0;
4400
4401 - eth = (struct ethhdr *)skb_mac_header(skb);
4402 + eth = (struct ethhdr *)skb->data;
4403 type = eth->h_proto;
4404 }
4405
4406 diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
4407 index a1d1f50e0e19..7d9cf26f4bb1 100644
4408 --- a/net/ipv4/arp.c
4409 +++ b/net/ipv4/arp.c
4410 @@ -437,7 +437,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
4411 /*unsigned long now; */
4412 struct net *net = dev_net(dev);
4413
4414 - rt = ip_route_output(net, sip, tip, 0, 0);
4415 + rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev));
4416 if (IS_ERR(rt))
4417 return 1;
4418 if (rt->dst.dev != dev) {
4419 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
4420 index 1ee6c0d8dde4..f39955913d3f 100644
4421 --- a/net/ipv4/fib_semantics.c
4422 +++ b/net/ipv4/fib_semantics.c
4423 @@ -1755,18 +1755,20 @@ void fib_select_multipath(struct fib_result *res, int hash)
4424 bool first = false;
4425
4426 for_nexthops(fi) {
4427 + if (net->ipv4.sysctl_fib_multipath_use_neigh) {
4428 + if (!fib_good_nh(nh))
4429 + continue;
4430 + if (!first) {
4431 + res->nh_sel = nhsel;
4432 + first = true;
4433 + }
4434 + }
4435 +
4436 if (hash > atomic_read(&nh->nh_upper_bound))
4437 continue;
4438
4439 - if (!net->ipv4.sysctl_fib_multipath_use_neigh ||
4440 - fib_good_nh(nh)) {
4441 - res->nh_sel = nhsel;
4442 - return;
4443 - }
4444 - if (!first) {
4445 - res->nh_sel = nhsel;
4446 - first = true;
4447 - }
4448 + res->nh_sel = nhsel;
4449 + return;
4450 } endfor_nexthops(fi);
4451 }
4452 #endif
4453 diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
4454 index 4e90082b23a6..13f7bbc0168d 100644
4455 --- a/net/ipv4/ip_tunnel.c
4456 +++ b/net/ipv4/ip_tunnel.c
4457 @@ -253,13 +253,14 @@ static struct net_device *__ip_tunnel_create(struct net *net,
4458 struct net_device *dev;
4459 char name[IFNAMSIZ];
4460
4461 - if (parms->name[0])
4462 + err = -E2BIG;
4463 + if (parms->name[0]) {
4464 + if (!dev_valid_name(parms->name))
4465 + goto failed;
4466 strlcpy(name, parms->name, IFNAMSIZ);
4467 - else {
4468 - if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
4469 - err = -E2BIG;
4470 + } else {
4471 + if (strlen(ops->kind) > (IFNAMSIZ - 3))
4472 goto failed;
4473 - }
4474 strlcpy(name, ops->kind, IFNAMSIZ);
4475 strncat(name, "%d", 2);
4476 }
4477 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
4478 index e8ab306794d8..4228f3b2f347 100644
4479 --- a/net/ipv6/ip6_gre.c
4480 +++ b/net/ipv6/ip6_gre.c
4481 @@ -319,11 +319,13 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
4482 if (t || !create)
4483 return t;
4484
4485 - if (parms->name[0])
4486 + if (parms->name[0]) {
4487 + if (!dev_valid_name(parms->name))
4488 + return NULL;
4489 strlcpy(name, parms->name, IFNAMSIZ);
4490 - else
4491 + } else {
4492 strcpy(name, "ip6gre%d");
4493 -
4494 + }
4495 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
4496 ip6gre_tunnel_setup);
4497 if (!dev)
4498 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
4499 index 3763dc01e374..ffbb81609016 100644
4500 --- a/net/ipv6/ip6_output.c
4501 +++ b/net/ipv6/ip6_output.c
4502 @@ -138,6 +138,14 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
4503 return ret;
4504 }
4505
4506 +#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
4507 + /* Policy lookup after SNAT yielded a new policy */
4508 + if (skb_dst(skb)->xfrm) {
4509 + IPCB(skb)->flags |= IPSKB_REROUTED;
4510 + return dst_output(net, sk, skb);
4511 + }
4512 +#endif
4513 +
4514 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
4515 dst_allfrag(skb_dst(skb)) ||
4516 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
4517 @@ -367,6 +375,11 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
4518 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
4519 struct sk_buff *skb)
4520 {
4521 + struct dst_entry *dst = skb_dst(skb);
4522 +
4523 + __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
4524 + __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
4525 +
4526 return dst_output(net, sk, skb);
4527 }
4528
4529 @@ -560,8 +573,6 @@ int ip6_forward(struct sk_buff *skb)
4530
4531 hdr->hop_limit--;
4532
4533 - __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
4534 - __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
4535 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
4536 net, NULL, skb, skb->dev, dst->dev,
4537 ip6_forward_finish);
4538 @@ -1237,7 +1248,7 @@ static int __ip6_append_data(struct sock *sk,
4539 const struct sockcm_cookie *sockc)
4540 {
4541 struct sk_buff *skb, *skb_prev = NULL;
4542 - unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
4543 + unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
4544 int exthdrlen = 0;
4545 int dst_exthdrlen = 0;
4546 int hh_len;
4547 @@ -1273,6 +1284,12 @@ static int __ip6_append_data(struct sock *sk,
4548 sizeof(struct frag_hdr) : 0) +
4549 rt->rt6i_nfheader_len;
4550
4551 + /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
4552 + * the first fragment
4553 + */
4554 + if (headersize + transhdrlen > mtu)
4555 + goto emsgsize;
4556 +
4557 if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
4558 (sk->sk_protocol == IPPROTO_UDP ||
4559 sk->sk_protocol == IPPROTO_RAW)) {
4560 @@ -1288,9 +1305,8 @@ static int __ip6_append_data(struct sock *sk,
4561
4562 if (cork->length + length > maxnonfragsize - headersize) {
4563 emsgsize:
4564 - ipv6_local_error(sk, EMSGSIZE, fl6,
4565 - mtu - headersize +
4566 - sizeof(struct ipv6hdr));
4567 + pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
4568 + ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
4569 return -EMSGSIZE;
4570 }
4571
4572 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
4573 index 1161fd5630c1..7e11f6a811f5 100644
4574 --- a/net/ipv6/ip6_tunnel.c
4575 +++ b/net/ipv6/ip6_tunnel.c
4576 @@ -297,13 +297,16 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
4577 struct net_device *dev;
4578 struct ip6_tnl *t;
4579 char name[IFNAMSIZ];
4580 - int err = -ENOMEM;
4581 + int err = -E2BIG;
4582
4583 - if (p->name[0])
4584 + if (p->name[0]) {
4585 + if (!dev_valid_name(p->name))
4586 + goto failed;
4587 strlcpy(name, p->name, IFNAMSIZ);
4588 - else
4589 + } else {
4590 sprintf(name, "ip6tnl%%d");
4591 -
4592 + }
4593 + err = -ENOMEM;
4594 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
4595 ip6_tnl_dev_setup);
4596 if (!dev)
4597 diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
4598 index bcdc2d557de1..7c0f647b5195 100644
4599 --- a/net/ipv6/ip6_vti.c
4600 +++ b/net/ipv6/ip6_vti.c
4601 @@ -212,10 +212,13 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
4602 char name[IFNAMSIZ];
4603 int err;
4604
4605 - if (p->name[0])
4606 + if (p->name[0]) {
4607 + if (!dev_valid_name(p->name))
4608 + goto failed;
4609 strlcpy(name, p->name, IFNAMSIZ);
4610 - else
4611 + } else {
4612 sprintf(name, "ip6_vti%%d");
4613 + }
4614
4615 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
4616 if (!dev)
4617 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4618 index a4a865c8a23c..0126d9bfa670 100644
4619 --- a/net/ipv6/route.c
4620 +++ b/net/ipv6/route.c
4621 @@ -871,6 +871,9 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net,
4622 struct fib6_node *fn;
4623 struct rt6_info *rt;
4624
4625 + if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
4626 + flags &= ~RT6_LOOKUP_F_IFACE;
4627 +
4628 read_lock_bh(&table->tb6_lock);
4629 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
4630 restart:
4631 diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
4632 index 7a78dcfda68a..f343e6f0fc95 100644
4633 --- a/net/ipv6/seg6_iptunnel.c
4634 +++ b/net/ipv6/seg6_iptunnel.c
4635 @@ -16,6 +16,7 @@
4636 #include <linux/net.h>
4637 #include <linux/module.h>
4638 #include <net/ip.h>
4639 +#include <net/ip_tunnels.h>
4640 #include <net/lwtunnel.h>
4641 #include <net/netevent.h>
4642 #include <net/netns/generic.h>
4643 @@ -211,11 +212,6 @@ static int seg6_do_srh(struct sk_buff *skb)
4644
4645 tinfo = seg6_encap_lwtunnel(dst->lwtstate);
4646
4647 - if (likely(!skb->encapsulation)) {
4648 - skb_reset_inner_headers(skb);
4649 - skb->encapsulation = 1;
4650 - }
4651 -
4652 switch (tinfo->mode) {
4653 case SEG6_IPTUN_MODE_INLINE:
4654 if (skb->protocol != htons(ETH_P_IPV6))
4655 @@ -224,10 +220,12 @@ static int seg6_do_srh(struct sk_buff *skb)
4656 err = seg6_do_srh_inline(skb, tinfo->srh);
4657 if (err)
4658 return err;
4659 -
4660 - skb_reset_inner_headers(skb);
4661 break;
4662 case SEG6_IPTUN_MODE_ENCAP:
4663 + err = iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6);
4664 + if (err)
4665 + return err;
4666 +
4667 if (skb->protocol == htons(ETH_P_IPV6))
4668 proto = IPPROTO_IPV6;
4669 else if (skb->protocol == htons(ETH_P_IP))
4670 @@ -239,6 +237,8 @@ static int seg6_do_srh(struct sk_buff *skb)
4671 if (err)
4672 return err;
4673
4674 + skb_set_inner_transport_header(skb, skb_transport_offset(skb));
4675 + skb_set_inner_protocol(skb, skb->protocol);
4676 skb->protocol = htons(ETH_P_IPV6);
4677 break;
4678 case SEG6_IPTUN_MODE_L2ENCAP:
4679 @@ -262,8 +262,6 @@ static int seg6_do_srh(struct sk_buff *skb)
4680 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
4681 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
4682
4683 - skb_set_inner_protocol(skb, skb->protocol);
4684 -
4685 return 0;
4686 }
4687
4688 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
4689 index cac815cc8600..f03c1a562135 100644
4690 --- a/net/ipv6/sit.c
4691 +++ b/net/ipv6/sit.c
4692 @@ -244,11 +244,13 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
4693 if (!create)
4694 goto failed;
4695
4696 - if (parms->name[0])
4697 + if (parms->name[0]) {
4698 + if (!dev_valid_name(parms->name))
4699 + goto failed;
4700 strlcpy(name, parms->name, IFNAMSIZ);
4701 - else
4702 + } else {
4703 strcpy(name, "sit%d");
4704 -
4705 + }
4706 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
4707 ipip6_tunnel_setup);
4708 if (!dev)
4709 diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
4710 index c28223d8092b..fca69c3771f5 100644
4711 --- a/net/l2tp/l2tp_netlink.c
4712 +++ b/net/l2tp/l2tp_netlink.c
4713 @@ -765,6 +765,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
4714
4715 if ((session->ifname[0] &&
4716 nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
4717 + (session->offset &&
4718 + nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) ||
4719 (session->cookie_len &&
4720 nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
4721 &session->cookie[0])) ||
4722 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
4723 index 84f757c5d91a..288640471c2f 100644
4724 --- a/net/mac80211/cfg.c
4725 +++ b/net/mac80211/cfg.c
4726 @@ -2373,10 +2373,17 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
4727 struct ieee80211_sub_if_data *sdata;
4728 enum nl80211_tx_power_setting txp_type = type;
4729 bool update_txp_type = false;
4730 + bool has_monitor = false;
4731
4732 if (wdev) {
4733 sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
4734
4735 + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
4736 + sdata = rtnl_dereference(local->monitor_sdata);
4737 + if (!sdata)
4738 + return -EOPNOTSUPP;
4739 + }
4740 +
4741 switch (type) {
4742 case NL80211_TX_POWER_AUTOMATIC:
4743 sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
4744 @@ -2415,15 +2422,34 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
4745
4746 mutex_lock(&local->iflist_mtx);
4747 list_for_each_entry(sdata, &local->interfaces, list) {
4748 + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
4749 + has_monitor = true;
4750 + continue;
4751 + }
4752 sdata->user_power_level = local->user_power_level;
4753 if (txp_type != sdata->vif.bss_conf.txpower_type)
4754 update_txp_type = true;
4755 sdata->vif.bss_conf.txpower_type = txp_type;
4756 }
4757 - list_for_each_entry(sdata, &local->interfaces, list)
4758 + list_for_each_entry(sdata, &local->interfaces, list) {
4759 + if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
4760 + continue;
4761 ieee80211_recalc_txpower(sdata, update_txp_type);
4762 + }
4763 mutex_unlock(&local->iflist_mtx);
4764
4765 + if (has_monitor) {
4766 + sdata = rtnl_dereference(local->monitor_sdata);
4767 + if (sdata) {
4768 + sdata->user_power_level = local->user_power_level;
4769 + if (txp_type != sdata->vif.bss_conf.txpower_type)
4770 + update_txp_type = true;
4771 + sdata->vif.bss_conf.txpower_type = txp_type;
4772 +
4773 + ieee80211_recalc_txpower(sdata, update_txp_type);
4774 + }
4775 + }
4776 +
4777 return 0;
4778 }
4779
4780 diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
4781 index c7f93fd9ca7a..4d82fe7d627c 100644
4782 --- a/net/mac80211/driver-ops.h
4783 +++ b/net/mac80211/driver-ops.h
4784 @@ -165,7 +165,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
4785 if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
4786 sdata->vif.type == NL80211_IFTYPE_NAN ||
4787 (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
4788 - !sdata->vif.mu_mimo_owner)))
4789 + !sdata->vif.mu_mimo_owner &&
4790 + !(changed & BSS_CHANGED_TXPOWER))))
4791 return;
4792
4793 if (!check_sdata_in_driver(sdata))
4794 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
4795 index 9219bc134109..1b86eccf94b6 100644
4796 --- a/net/netlink/af_netlink.c
4797 +++ b/net/netlink/af_netlink.c
4798 @@ -1053,6 +1053,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
4799 if (addr->sa_family != AF_NETLINK)
4800 return -EINVAL;
4801
4802 + if (alen < sizeof(struct sockaddr_nl))
4803 + return -EINVAL;
4804 +
4805 if ((nladdr->nl_groups || nladdr->nl_pid) &&
4806 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
4807 return -EPERM;
4808 diff --git a/net/rds/bind.c b/net/rds/bind.c
4809 index 75d43dc8e96b..5aa3a64aa4f0 100644
4810 --- a/net/rds/bind.c
4811 +++ b/net/rds/bind.c
4812 @@ -114,6 +114,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
4813 rs, &addr, (int)ntohs(*port));
4814 break;
4815 } else {
4816 + rs->rs_bound_addr = 0;
4817 rds_sock_put(rs);
4818 ret = -ENOMEM;
4819 break;
4820 diff --git a/net/sched/act_api.c b/net/sched/act_api.c
4821 index 8f2c63514956..4444d7e755e6 100644
4822 --- a/net/sched/act_api.c
4823 +++ b/net/sched/act_api.c
4824 @@ -133,8 +133,10 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
4825 continue;
4826
4827 nest = nla_nest_start(skb, n_i);
4828 - if (!nest)
4829 + if (!nest) {
4830 + index--;
4831 goto nla_put_failure;
4832 + }
4833 err = tcf_action_dump_1(skb, p, 0, 0);
4834 if (err < 0) {
4835 index--;
4836 diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
4837 index c0c707eb2c96..2b087623fb1d 100644
4838 --- a/net/sched/act_bpf.c
4839 +++ b/net/sched/act_bpf.c
4840 @@ -248,10 +248,14 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
4841
4842 static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
4843 {
4844 - if (cfg->is_ebpf)
4845 - bpf_prog_put(cfg->filter);
4846 - else
4847 - bpf_prog_destroy(cfg->filter);
4848 + struct bpf_prog *filter = cfg->filter;
4849 +
4850 + if (filter) {
4851 + if (cfg->is_ebpf)
4852 + bpf_prog_put(filter);
4853 + else
4854 + bpf_prog_destroy(filter);
4855 + }
4856
4857 kfree(cfg->bpf_ops);
4858 kfree(cfg->bpf_name);
4859 diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
4860 index b642ad3d39dd..6d10b3af479b 100644
4861 --- a/net/sched/act_skbmod.c
4862 +++ b/net/sched/act_skbmod.c
4863 @@ -190,7 +190,8 @@ static void tcf_skbmod_cleanup(struct tc_action *a, int bind)
4864 struct tcf_skbmod_params *p;
4865
4866 p = rcu_dereference_protected(d->skbmod_p, 1);
4867 - kfree_rcu(p, rcu);
4868 + if (p)
4869 + kfree_rcu(p, rcu);
4870 }
4871
4872 static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
4873 diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
4874 index 22bf1a376b91..7cb63616805d 100644
4875 --- a/net/sched/act_tunnel_key.c
4876 +++ b/net/sched/act_tunnel_key.c
4877 @@ -208,11 +208,12 @@ static void tunnel_key_release(struct tc_action *a, int bind)
4878 struct tcf_tunnel_key_params *params;
4879
4880 params = rcu_dereference_protected(t->params, 1);
4881 + if (params) {
4882 + if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
4883 + dst_release(&params->tcft_enc_metadata->dst);
4884
4885 - if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
4886 - dst_release(&params->tcft_enc_metadata->dst);
4887 -
4888 - kfree_rcu(params, rcu);
4889 + kfree_rcu(params, rcu);
4890 + }
4891 }
4892
4893 static int tunnel_key_dump_addresses(struct sk_buff *skb,
4894 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
4895 index f27a9718554c..08b5705e7381 100644
4896 --- a/net/sctp/ipv6.c
4897 +++ b/net/sctp/ipv6.c
4898 @@ -728,8 +728,10 @@ static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
4899 sctp_v6_map_v4(addr);
4900 }
4901
4902 - if (addr->sa.sa_family == AF_INET)
4903 + if (addr->sa.sa_family == AF_INET) {
4904 + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
4905 return sizeof(struct sockaddr_in);
4906 + }
4907 return sizeof(struct sockaddr_in6);
4908 }
4909
4910 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
4911 index 6b3a862706de..2d6f612f32c3 100644
4912 --- a/net/sctp/socket.c
4913 +++ b/net/sctp/socket.c
4914 @@ -337,11 +337,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
4915 if (!opt->pf->af_supported(addr->sa.sa_family, opt))
4916 return NULL;
4917
4918 - /* V4 mapped address are really of AF_INET family */
4919 - if (addr->sa.sa_family == AF_INET6 &&
4920 - ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
4921 - !opt->pf->af_supported(AF_INET, opt))
4922 - return NULL;
4923 + if (addr->sa.sa_family == AF_INET6) {
4924 + if (len < SIN6_LEN_RFC2133)
4925 + return NULL;
4926 + /* V4 mapped address are really of AF_INET family */
4927 + if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
4928 + !opt->pf->af_supported(AF_INET, opt))
4929 + return NULL;
4930 + }
4931
4932 /* If we get this far, af is valid. */
4933 af = sctp_get_af_specific(addr->sa.sa_family);
4934 diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
4935 index c5fda15ba319..4a3a3f1331ee 100644
4936 --- a/net/strparser/strparser.c
4937 +++ b/net/strparser/strparser.c
4938 @@ -60,7 +60,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
4939 struct sock *sk = strp->sk;
4940
4941 /* Report an error on the lower socket */
4942 - sk->sk_err = err;
4943 + sk->sk_err = -err;
4944 sk->sk_error_report(sk);
4945 }
4946 }
4947 @@ -458,7 +458,7 @@ static void strp_msg_timeout(struct work_struct *w)
4948 /* Message assembly timed out */
4949 STRP_STATS_INCR(strp->stats.msg_timeouts);
4950 strp->cb.lock(strp);
4951 - strp->cb.abort_parser(strp, ETIMEDOUT);
4952 + strp->cb.abort_parser(strp, -ETIMEDOUT);
4953 strp->cb.unlock(strp);
4954 }
4955
4956 diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
4957 index 83d8dda15233..4eeb9afdc89f 100644
4958 --- a/sound/soc/intel/atom/sst/sst_stream.c
4959 +++ b/sound/soc/intel/atom/sst/sst_stream.c
4960 @@ -221,7 +221,7 @@ int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx,
4961 sst_free_block(sst_drv_ctx, block);
4962 out:
4963 test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id);
4964 - return 0;
4965 + return ret;
4966 }
4967
4968 /*
4969 diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
4970 index 5bcde01d15e6..fbfb76ee2346 100644
4971 --- a/sound/soc/intel/boards/cht_bsw_rt5645.c
4972 +++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
4973 @@ -133,6 +133,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
4974 SND_SOC_DAPM_HP("Headphone", NULL),
4975 SND_SOC_DAPM_MIC("Headset Mic", NULL),
4976 SND_SOC_DAPM_MIC("Int Mic", NULL),
4977 + SND_SOC_DAPM_MIC("Int Analog Mic", NULL),
4978 SND_SOC_DAPM_SPK("Ext Spk", NULL),
4979 SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
4980 platform_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
4981 @@ -143,6 +144,8 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
4982 {"IN1N", NULL, "Headset Mic"},
4983 {"DMIC L1", NULL, "Int Mic"},
4984 {"DMIC R1", NULL, "Int Mic"},
4985 + {"IN2P", NULL, "Int Analog Mic"},
4986 + {"IN2N", NULL, "Int Analog Mic"},
4987 {"Headphone", NULL, "HPOL"},
4988 {"Headphone", NULL, "HPOR"},
4989 {"Ext Spk", NULL, "SPOL"},
4990 @@ -150,6 +153,9 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
4991 {"Headphone", NULL, "Platform Clock"},
4992 {"Headset Mic", NULL, "Platform Clock"},
4993 {"Int Mic", NULL, "Platform Clock"},
4994 + {"Int Analog Mic", NULL, "Platform Clock"},
4995 + {"Int Analog Mic", NULL, "micbias1"},
4996 + {"Int Analog Mic", NULL, "micbias2"},
4997 {"Ext Spk", NULL, "Platform Clock"},
4998 };
4999
5000 @@ -204,6 +210,7 @@ static const struct snd_kcontrol_new cht_mc_controls[] = {
5001 SOC_DAPM_PIN_SWITCH("Headphone"),
5002 SOC_DAPM_PIN_SWITCH("Headset Mic"),
5003 SOC_DAPM_PIN_SWITCH("Int Mic"),
5004 + SOC_DAPM_PIN_SWITCH("Int Analog Mic"),
5005 SOC_DAPM_PIN_SWITCH("Ext Spk"),
5006 };
5007
5008 diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
5009 index 89f70133c8e4..b74a6040cd96 100644
5010 --- a/sound/soc/intel/skylake/skl-messages.c
5011 +++ b/sound/soc/intel/skylake/skl-messages.c
5012 @@ -404,7 +404,11 @@ int skl_resume_dsp(struct skl *skl)
5013 if (skl->skl_sst->is_first_boot == true)
5014 return 0;
5015
5016 + /* disable dynamic clock gating during fw and lib download */
5017 + ctx->enable_miscbdcge(ctx->dev, false);
5018 +
5019 ret = skl_dsp_wake(ctx->dsp);
5020 + ctx->enable_miscbdcge(ctx->dev, true);
5021 if (ret < 0)
5022 return ret;
5023
5024 diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
5025 index 2b1e513b1680..7fe1e8f273a0 100644
5026 --- a/sound/soc/intel/skylake/skl-pcm.c
5027 +++ b/sound/soc/intel/skylake/skl-pcm.c
5028 @@ -1332,7 +1332,11 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
5029 return -EIO;
5030 }
5031
5032 + /* disable dynamic clock gating during fw and lib download */
5033 + skl->skl_sst->enable_miscbdcge(platform->dev, false);
5034 +
5035 ret = ops->init_fw(platform->dev, skl->skl_sst);
5036 + skl->skl_sst->enable_miscbdcge(platform->dev, true);
5037 if (ret < 0) {
5038 dev_err(platform->dev, "Failed to boot first fw: %d\n", ret);
5039 return ret;
5040 diff --git a/tools/objtool/check.c b/tools/objtool/check.c
5041 index 9d01d0b1084e..c8b8b7101c6f 100644
5042 --- a/tools/objtool/check.c
5043 +++ b/tools/objtool/check.c
5044 @@ -1385,6 +1385,17 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
5045 state->vals[op->dest.reg].offset = -state->stack_size;
5046 }
5047
5048 + else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
5049 + cfa->base == CFI_BP) {
5050 +
5051 + /*
5052 + * mov %rbp, %rsp
5053 + *
5054 + * Restore the original stack pointer (Clang).
5055 + */
5056 + state->stack_size = -state->regs[CFI_BP].offset;
5057 + }
5058 +
5059 else if (op->dest.reg == cfa->base) {
5060
5061 /* mov %reg, %rsp */
5062 diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
5063 index 9c4e23d8c8ce..53d83d7e6a09 100644
5064 --- a/tools/perf/arch/powerpc/util/sym-handling.c
5065 +++ b/tools/perf/arch/powerpc/util/sym-handling.c
5066 @@ -64,6 +64,14 @@ int arch__compare_symbol_names_n(const char *namea, const char *nameb,
5067
5068 return strncmp(namea, nameb, n);
5069 }
5070 +
5071 +const char *arch__normalize_symbol_name(const char *name)
5072 +{
5073 + /* Skip over initial dot */
5074 + if (name && *name == '.')
5075 + name++;
5076 + return name;
5077 +}
5078 #endif
5079
5080 #if defined(_CALL_ELF) && _CALL_ELF == 2
5081 diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
5082 index 0c95ffefb6cc..1957abc1c8cf 100644
5083 --- a/tools/perf/builtin-record.c
5084 +++ b/tools/perf/builtin-record.c
5085 @@ -1856,8 +1856,8 @@ int cmd_record(int argc, const char **argv)
5086 goto out;
5087 }
5088
5089 - /* Enable ignoring missing threads when -u option is defined. */
5090 - rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
5091 + /* Enable ignoring missing threads when -u/-p option is defined. */
5092 + rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
5093
5094 err = -ENOMEM;
5095 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
5096 diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
5097 index fae4b0340750..183c3ed56e08 100644
5098 --- a/tools/perf/builtin-report.c
5099 +++ b/tools/perf/builtin-report.c
5100 @@ -162,12 +162,28 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
5101 struct hist_entry *he = iter->he;
5102 struct report *rep = arg;
5103 struct branch_info *bi;
5104 + struct perf_sample *sample = iter->sample;
5105 + struct perf_evsel *evsel = iter->evsel;
5106 + int err;
5107 +
5108 + if (!ui__has_annotation())
5109 + return 0;
5110 +
5111 + hist__account_cycles(sample->branch_stack, al, sample,
5112 + rep->nonany_branch_mode);
5113
5114 bi = he->branch_info;
5115 + err = addr_map_symbol__inc_samples(&bi->from, sample, evsel->idx);
5116 + if (err)
5117 + goto out;
5118 +
5119 + err = addr_map_symbol__inc_samples(&bi->to, sample, evsel->idx);
5120 +
5121 branch_type_count(&rep->brtype_stat, &bi->flags,
5122 bi->from.addr, bi->to.addr);
5123
5124 - return 0;
5125 +out:
5126 + return err;
5127 }
5128
5129 static int process_sample_event(struct perf_tool *tool,
5130 diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
5131 index 1f6beb3d0c68..ac19130c14d8 100644
5132 --- a/tools/perf/util/evsel.c
5133 +++ b/tools/perf/util/evsel.c
5134 @@ -1591,10 +1591,46 @@ static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
5135 return fprintf(fp, " %-32s %s\n", name, val);
5136 }
5137
5138 +static void perf_evsel__remove_fd(struct perf_evsel *pos,
5139 + int nr_cpus, int nr_threads,
5140 + int thread_idx)
5141 +{
5142 + for (int cpu = 0; cpu < nr_cpus; cpu++)
5143 + for (int thread = thread_idx; thread < nr_threads - 1; thread++)
5144 + FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
5145 +}
5146 +
5147 +static int update_fds(struct perf_evsel *evsel,
5148 + int nr_cpus, int cpu_idx,
5149 + int nr_threads, int thread_idx)
5150 +{
5151 + struct perf_evsel *pos;
5152 +
5153 + if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
5154 + return -EINVAL;
5155 +
5156 + evlist__for_each_entry(evsel->evlist, pos) {
5157 + nr_cpus = pos != evsel ? nr_cpus : cpu_idx;
5158 +
5159 + perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
5160 +
5161 + /*
5162 + * Since fds for next evsel has not been created,
5163 + * there is no need to iterate whole event list.
5164 + */
5165 + if (pos == evsel)
5166 + break;
5167 + }
5168 + return 0;
5169 +}
5170 +
5171 static bool ignore_missing_thread(struct perf_evsel *evsel,
5172 + int nr_cpus, int cpu,
5173 struct thread_map *threads,
5174 int thread, int err)
5175 {
5176 + pid_t ignore_pid = thread_map__pid(threads, thread);
5177 +
5178 if (!evsel->ignore_missing_thread)
5179 return false;
5180
5181 @@ -1610,11 +1646,18 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
5182 if (threads->nr == 1)
5183 return false;
5184
5185 + /*
5186 + * We should remove fd for missing_thread first
5187 + * because thread_map__remove() will decrease threads->nr.
5188 + */
5189 + if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
5190 + return false;
5191 +
5192 if (thread_map__remove(threads, thread))
5193 return false;
5194
5195 pr_warning("WARNING: Ignored open failure for pid %d\n",
5196 - thread_map__pid(threads, thread));
5197 + ignore_pid);
5198 return true;
5199 }
5200
5201 @@ -1719,7 +1762,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
5202 if (fd < 0) {
5203 err = -errno;
5204
5205 - if (ignore_missing_thread(evsel, threads, thread, err)) {
5206 + if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
5207 /*
5208 * We just removed 1 thread, so take a step
5209 * back on thread index and lower the upper
5210 diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
5211 index b7aaf9b2294d..68786bb7790e 100644
5212 --- a/tools/perf/util/probe-event.c
5213 +++ b/tools/perf/util/probe-event.c
5214 @@ -2625,6 +2625,14 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
5215
5216 out:
5217 free(nbase);
5218 +
5219 + /* Final validation */
5220 + if (ret >= 0 && !is_c_func_name(buf)) {
5221 + pr_warning("Internal error: \"%s\" is an invalid event name.\n",
5222 + buf);
5223 + ret = -EINVAL;
5224 + }
5225 +
5226 return ret;
5227 }
5228
5229 @@ -2792,16 +2800,32 @@ static int find_probe_functions(struct map *map, char *name,
5230 int found = 0;
5231 struct symbol *sym;
5232 struct rb_node *tmp;
5233 + const char *norm, *ver;
5234 + char *buf = NULL;
5235
5236 if (map__load(map) < 0)
5237 return 0;
5238
5239 map__for_each_symbol(map, sym, tmp) {
5240 - if (strglobmatch(sym->name, name)) {
5241 + norm = arch__normalize_symbol_name(sym->name);
5242 + if (!norm)
5243 + continue;
5244 +
5245 + /* We don't care about default symbol or not */
5246 + ver = strchr(norm, '@');
5247 + if (ver) {
5248 + buf = strndup(norm, ver - norm);
5249 + if (!buf)
5250 + return -ENOMEM;
5251 + norm = buf;
5252 + }
5253 + if (strglobmatch(norm, name)) {
5254 found++;
5255 if (syms && found < probe_conf.max_probes)
5256 syms[found - 1] = sym;
5257 }
5258 + if (buf)
5259 + zfree(&buf);
5260 }
5261
5262 return found;
5263 @@ -2847,7 +2871,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
5264 * same name but different addresses, this lists all the symbols.
5265 */
5266 num_matched_functions = find_probe_functions(map, pp->function, syms);
5267 - if (num_matched_functions == 0) {
5268 + if (num_matched_functions <= 0) {
5269 pr_err("Failed to find symbol %s in %s\n", pp->function,
5270 pev->target ? : "kernel");
5271 ret = -ENOENT;
5272 diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
5273 index 6492ef38b090..4e8dd5fd45fd 100644
5274 --- a/tools/perf/util/symbol.c
5275 +++ b/tools/perf/util/symbol.c
5276 @@ -93,6 +93,11 @@ static int prefix_underscores_count(const char *str)
5277 return tail - str;
5278 }
5279
5280 +const char * __weak arch__normalize_symbol_name(const char *name)
5281 +{
5282 + return name;
5283 +}
5284 +
5285 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
5286 {
5287 return strcmp(namea, nameb);
5288 diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
5289 index 6352022593c6..698c65e603a8 100644
5290 --- a/tools/perf/util/symbol.h
5291 +++ b/tools/perf/util/symbol.h
5292 @@ -347,6 +347,7 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
5293 void arch__sym_update(struct symbol *s, GElf_Sym *sym);
5294 #endif
5295
5296 +const char *arch__normalize_symbol_name(const char *name);
5297 #define SYMBOL_A 0
5298 #define SYMBOL_B 1
5299
5300 diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
5301 index 3687b720327a..cc57c246eade 100644
5302 --- a/tools/perf/util/util.c
5303 +++ b/tools/perf/util/util.c
5304 @@ -196,7 +196,7 @@ int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
5305
5306 size -= ret;
5307 off_in += ret;
5308 - off_out -= ret;
5309 + off_out += ret;
5310 }
5311 munmap(ptr, off_in + size);
5312
5313 diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
5314 index 3ab6ec403905..e11fe84de0fd 100644
5315 --- a/tools/testing/selftests/net/msg_zerocopy.c
5316 +++ b/tools/testing/selftests/net/msg_zerocopy.c
5317 @@ -259,22 +259,28 @@ static int setup_ip6h(struct ipv6hdr *ip6h, uint16_t payload_len)
5318 return sizeof(*ip6h);
5319 }
5320
5321 -static void setup_sockaddr(int domain, const char *str_addr, void *sockaddr)
5322 +
5323 +static void setup_sockaddr(int domain, const char *str_addr,
5324 + struct sockaddr_storage *sockaddr)
5325 {
5326 struct sockaddr_in6 *addr6 = (void *) sockaddr;
5327 struct sockaddr_in *addr4 = (void *) sockaddr;
5328
5329 switch (domain) {
5330 case PF_INET:
5331 + memset(addr4, 0, sizeof(*addr4));
5332 addr4->sin_family = AF_INET;
5333 addr4->sin_port = htons(cfg_port);
5334 - if (inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
5335 + if (str_addr &&
5336 + inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
5337 error(1, 0, "ipv4 parse error: %s", str_addr);
5338 break;
5339 case PF_INET6:
5340 + memset(addr6, 0, sizeof(*addr6));
5341 addr6->sin6_family = AF_INET6;
5342 addr6->sin6_port = htons(cfg_port);
5343 - if (inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
5344 + if (str_addr &&
5345 + inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
5346 error(1, 0, "ipv6 parse error: %s", str_addr);
5347 break;
5348 default:
5349 @@ -603,6 +609,7 @@ static void parse_opts(int argc, char **argv)
5350 sizeof(struct tcphdr) -
5351 40 /* max tcp options */;
5352 int c;
5353 + char *daddr = NULL, *saddr = NULL;
5354
5355 cfg_payload_len = max_payload_len;
5356
5357 @@ -627,7 +634,7 @@ static void parse_opts(int argc, char **argv)
5358 cfg_cpu = strtol(optarg, NULL, 0);
5359 break;
5360 case 'D':
5361 - setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
5362 + daddr = optarg;
5363 break;
5364 case 'i':
5365 cfg_ifindex = if_nametoindex(optarg);
5366 @@ -638,7 +645,7 @@ static void parse_opts(int argc, char **argv)
5367 cfg_cork_mixed = true;
5368 break;
5369 case 'p':
5370 - cfg_port = htons(strtoul(optarg, NULL, 0));
5371 + cfg_port = strtoul(optarg, NULL, 0);
5372 break;
5373 case 'r':
5374 cfg_rx = true;
5375 @@ -647,7 +654,7 @@ static void parse_opts(int argc, char **argv)
5376 cfg_payload_len = strtoul(optarg, NULL, 0);
5377 break;
5378 case 'S':
5379 - setup_sockaddr(cfg_family, optarg, &cfg_src_addr);
5380 + saddr = optarg;
5381 break;
5382 case 't':
5383 cfg_runtime_ms = 200 + strtoul(optarg, NULL, 10) * 1000;
5384 @@ -660,6 +667,8 @@ static void parse_opts(int argc, char **argv)
5385 break;
5386 }
5387 }
5388 + setup_sockaddr(cfg_family, daddr, &cfg_dst_addr);
5389 + setup_sockaddr(cfg_family, saddr, &cfg_src_addr);
5390
5391 if (cfg_payload_len > max_payload_len)
5392 error(1, 0, "-s: payload exceeds max (%d)", max_payload_len);