Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0220-4.9.121-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3213 - (show annotations) (download)
Tue Aug 21 12:02:37 2018 UTC (5 years, 8 months ago) by niro
File size: 36860 byte(s)
-linux-4.9.121
1 diff --git a/Documentation/Changes b/Documentation/Changes
2 index 22797a15dc24..76d6dc0d3227 100644
3 --- a/Documentation/Changes
4 +++ b/Documentation/Changes
5 @@ -33,7 +33,7 @@ GNU C 3.2 gcc --version
6 GNU make 3.80 make --version
7 binutils 2.12 ld -v
8 util-linux 2.10o fdformat --version
9 -module-init-tools 0.9.10 depmod -V
10 +kmod 13 depmod -V
11 e2fsprogs 1.41.4 e2fsck -V
12 jfsutils 1.1.3 fsck.jfs -V
13 reiserfsprogs 3.6.3 reiserfsck -V
14 @@ -143,12 +143,6 @@ is not build with ``CONFIG_KALLSYMS`` and you have no way to rebuild and
15 reproduce the Oops with that option, then you can still decode that Oops
16 with ksymoops.
17
18 -Module-Init-Tools
19 ------------------
20 -
21 -A new module loader is now in the kernel that requires ``module-init-tools``
22 -to use. It is backward compatible with the 2.4.x series kernels.
23 -
24 Mkinitrd
25 --------
26
27 @@ -363,16 +357,17 @@ Util-linux
28
29 - <ftp://ftp.kernel.org/pub/linux/utils/util-linux/>
30
31 +Kmod
32 +----
33 +
34 +- <https://www.kernel.org/pub/linux/utils/kernel/kmod/>
35 +- <https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git>
36 +
37 Ksymoops
38 --------
39
40 - <ftp://ftp.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
41
42 -Module-Init-Tools
43 ------------------
44 -
45 -- <ftp://ftp.kernel.org/pub/linux/kernel/people/rusty/modules/>
46 -
47 Mkinitrd
48 --------
49
50 diff --git a/Makefile b/Makefile
51 index fea2fe577185..e54a126841a9 100644
52 --- a/Makefile
53 +++ b/Makefile
54 @@ -1,6 +1,6 @@
55 VERSION = 4
56 PATCHLEVEL = 9
57 -SUBLEVEL = 120
58 +SUBLEVEL = 121
59 EXTRAVERSION =
60 NAME = Roaring Lionus
61
62 @@ -417,7 +417,8 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
63 export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
64
65 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
66 -export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN
67 +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
68 +export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
69 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
70 export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
71 export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
72 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
73 index 4cd4862845cd..0a56898f8410 100644
74 --- a/arch/arm64/mm/mmu.c
75 +++ b/arch/arm64/mm/mmu.c
76 @@ -804,12 +804,12 @@ int pmd_clear_huge(pmd_t *pmd)
77 return 1;
78 }
79
80 -int pud_free_pmd_page(pud_t *pud)
81 +int pud_free_pmd_page(pud_t *pud, unsigned long addr)
82 {
83 return pud_none(*pud);
84 }
85
86 -int pmd_free_pte_page(pmd_t *pmd)
87 +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
88 {
89 return pmd_none(*pmd);
90 }
91 diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
92 index ec9bee661d50..b7f50427a3ef 100644
93 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
94 +++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
95 @@ -265,7 +265,7 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
96 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
97 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
98 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
99 - vmovd _args_digest(state , idx, 4) , %xmm0
100 + vmovd _args_digest+4*32(state, idx, 4), %xmm1
101 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
102 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
103 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
104 diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
105 index bb078786a323..be6492c0deae 100644
106 --- a/arch/x86/include/asm/i8259.h
107 +++ b/arch/x86/include/asm/i8259.h
108 @@ -2,6 +2,7 @@
109 #define _ASM_X86_I8259_H
110
111 #include <linux/delay.h>
112 +#include <asm/io.h>
113
114 extern unsigned int cached_irq_mask;
115
116 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
117 index 5229eaf73828..ac67a76550bd 100644
118 --- a/arch/x86/kernel/cpu/bugs.c
119 +++ b/arch/x86/kernel/cpu/bugs.c
120 @@ -647,10 +647,9 @@ void x86_spec_ctrl_setup_ap(void)
121 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
122 #if IS_ENABLED(CONFIG_KVM_INTEL)
123 EXPORT_SYMBOL_GPL(l1tf_mitigation);
124 -
125 +#endif
126 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
127 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
128 -#endif
129
130 static void __init l1tf_select_mitigation(void)
131 {
132 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
133 index a3b63e5a527c..e30baa8ad94f 100644
134 --- a/arch/x86/mm/pgtable.c
135 +++ b/arch/x86/mm/pgtable.c
136 @@ -653,28 +653,50 @@ int pmd_clear_huge(pmd_t *pmd)
137 return 0;
138 }
139
140 +#ifdef CONFIG_X86_64
141 /**
142 * pud_free_pmd_page - Clear pud entry and free pmd page.
143 * @pud: Pointer to a PUD.
144 + * @addr: Virtual address associated with pud.
145 *
146 - * Context: The pud range has been unmaped and TLB purged.
147 + * Context: The pud range has been unmapped and TLB purged.
148 * Return: 1 if clearing the entry succeeded. 0 otherwise.
149 + *
150 + * NOTE: Callers must allow a single page allocation.
151 */
152 -int pud_free_pmd_page(pud_t *pud)
153 +int pud_free_pmd_page(pud_t *pud, unsigned long addr)
154 {
155 - pmd_t *pmd;
156 + pmd_t *pmd, *pmd_sv;
157 + pte_t *pte;
158 int i;
159
160 if (pud_none(*pud))
161 return 1;
162
163 pmd = (pmd_t *)pud_page_vaddr(*pud);
164 + pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
165 + if (!pmd_sv)
166 + return 0;
167
168 - for (i = 0; i < PTRS_PER_PMD; i++)
169 - if (!pmd_free_pte_page(&pmd[i]))
170 - return 0;
171 + for (i = 0; i < PTRS_PER_PMD; i++) {
172 + pmd_sv[i] = pmd[i];
173 + if (!pmd_none(pmd[i]))
174 + pmd_clear(&pmd[i]);
175 + }
176
177 pud_clear(pud);
178 +
179 + /* INVLPG to clear all paging-structure caches */
180 + flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
181 +
182 + for (i = 0; i < PTRS_PER_PMD; i++) {
183 + if (!pmd_none(pmd_sv[i])) {
184 + pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
185 + free_page((unsigned long)pte);
186 + }
187 + }
188 +
189 + free_page((unsigned long)pmd_sv);
190 free_page((unsigned long)pmd);
191
192 return 1;
193 @@ -683,11 +705,12 @@ int pud_free_pmd_page(pud_t *pud)
194 /**
195 * pmd_free_pte_page - Clear pmd entry and free pte page.
196 * @pmd: Pointer to a PMD.
197 + * @addr: Virtual address associated with pmd.
198 *
199 - * Context: The pmd range has been unmaped and TLB purged.
200 + * Context: The pmd range has been unmapped and TLB purged.
201 * Return: 1 if clearing the entry succeeded. 0 otherwise.
202 */
203 -int pmd_free_pte_page(pmd_t *pmd)
204 +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
205 {
206 pte_t *pte;
207
208 @@ -696,8 +719,30 @@ int pmd_free_pte_page(pmd_t *pmd)
209
210 pte = (pte_t *)pmd_page_vaddr(*pmd);
211 pmd_clear(pmd);
212 +
213 + /* INVLPG to clear all paging-structure caches */
214 + flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
215 +
216 free_page((unsigned long)pte);
217
218 return 1;
219 }
220 +
221 +#else /* !CONFIG_X86_64 */
222 +
223 +int pud_free_pmd_page(pud_t *pud, unsigned long addr)
224 +{
225 + return pud_none(*pud);
226 +}
227 +
228 +/*
229 + * Disable free page handling on x86-PAE. This assures that ioremap()
230 + * does not update sync'd pmd entries. See vmalloc_sync_one().
231 + */
232 +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
233 +{
234 + return pmd_none(*pmd);
235 +}
236 +
237 +#endif /* CONFIG_X86_64 */
238 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
239 diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
240 index d676fc59521a..860c9e5dfd7a 100644
241 --- a/crypto/ablkcipher.c
242 +++ b/crypto/ablkcipher.c
243 @@ -70,11 +70,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
244 return max(start, end_page);
245 }
246
247 -static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
248 - unsigned int bsize)
249 +static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
250 + unsigned int n)
251 {
252 - unsigned int n = bsize;
253 -
254 for (;;) {
255 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
256
257 @@ -86,17 +84,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
258 n -= len_this_page;
259 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
260 }
261 -
262 - return bsize;
263 }
264
265 -static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
266 - unsigned int n)
267 +static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
268 + unsigned int n)
269 {
270 scatterwalk_advance(&walk->in, n);
271 scatterwalk_advance(&walk->out, n);
272 -
273 - return n;
274 }
275
276 static int ablkcipher_walk_next(struct ablkcipher_request *req,
277 @@ -106,39 +100,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
278 struct ablkcipher_walk *walk, int err)
279 {
280 struct crypto_tfm *tfm = req->base.tfm;
281 - unsigned int nbytes = 0;
282 + unsigned int n; /* bytes processed */
283 + bool more;
284
285 - if (likely(err >= 0)) {
286 - unsigned int n = walk->nbytes - err;
287 + if (unlikely(err < 0))
288 + goto finish;
289
290 - if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
291 - n = ablkcipher_done_fast(walk, n);
292 - else if (WARN_ON(err)) {
293 - err = -EINVAL;
294 - goto err;
295 - } else
296 - n = ablkcipher_done_slow(walk, n);
297 + n = walk->nbytes - err;
298 + walk->total -= n;
299 + more = (walk->total != 0);
300
301 - nbytes = walk->total - n;
302 - err = 0;
303 + if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
304 + ablkcipher_done_fast(walk, n);
305 + } else {
306 + if (WARN_ON(err)) {
307 + /* unexpected case; didn't process all bytes */
308 + err = -EINVAL;
309 + goto finish;
310 + }
311 + ablkcipher_done_slow(walk, n);
312 }
313
314 - scatterwalk_done(&walk->in, 0, nbytes);
315 - scatterwalk_done(&walk->out, 1, nbytes);
316 -
317 -err:
318 - walk->total = nbytes;
319 - walk->nbytes = nbytes;
320 + scatterwalk_done(&walk->in, 0, more);
321 + scatterwalk_done(&walk->out, 1, more);
322
323 - if (nbytes) {
324 + if (more) {
325 crypto_yield(req->base.flags);
326 return ablkcipher_walk_next(req, walk);
327 }
328 -
329 + err = 0;
330 +finish:
331 + walk->nbytes = 0;
332 if (walk->iv != req->info)
333 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
334 kfree(walk->iv_buffer);
335 -
336 return err;
337 }
338 EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
339 diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
340 index a832426820e8..27f98666763a 100644
341 --- a/crypto/blkcipher.c
342 +++ b/crypto/blkcipher.c
343 @@ -70,19 +70,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
344 return max(start, end_page);
345 }
346
347 -static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
348 - unsigned int bsize)
349 +static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
350 + unsigned int bsize)
351 {
352 u8 *addr;
353
354 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
355 addr = blkcipher_get_spot(addr, bsize);
356 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
357 - return bsize;
358 }
359
360 -static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
361 - unsigned int n)
362 +static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
363 + unsigned int n)
364 {
365 if (walk->flags & BLKCIPHER_WALK_COPY) {
366 blkcipher_map_dst(walk);
367 @@ -96,49 +95,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
368
369 scatterwalk_advance(&walk->in, n);
370 scatterwalk_advance(&walk->out, n);
371 -
372 - return n;
373 }
374
375 int blkcipher_walk_done(struct blkcipher_desc *desc,
376 struct blkcipher_walk *walk, int err)
377 {
378 - unsigned int nbytes = 0;
379 + unsigned int n; /* bytes processed */
380 + bool more;
381
382 - if (likely(err >= 0)) {
383 - unsigned int n = walk->nbytes - err;
384 + if (unlikely(err < 0))
385 + goto finish;
386
387 - if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
388 - n = blkcipher_done_fast(walk, n);
389 - else if (WARN_ON(err)) {
390 - err = -EINVAL;
391 - goto err;
392 - } else
393 - n = blkcipher_done_slow(walk, n);
394 + n = walk->nbytes - err;
395 + walk->total -= n;
396 + more = (walk->total != 0);
397
398 - nbytes = walk->total - n;
399 - err = 0;
400 + if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
401 + blkcipher_done_fast(walk, n);
402 + } else {
403 + if (WARN_ON(err)) {
404 + /* unexpected case; didn't process all bytes */
405 + err = -EINVAL;
406 + goto finish;
407 + }
408 + blkcipher_done_slow(walk, n);
409 }
410
411 - scatterwalk_done(&walk->in, 0, nbytes);
412 - scatterwalk_done(&walk->out, 1, nbytes);
413 + scatterwalk_done(&walk->in, 0, more);
414 + scatterwalk_done(&walk->out, 1, more);
415
416 -err:
417 - walk->total = nbytes;
418 - walk->nbytes = nbytes;
419 -
420 - if (nbytes) {
421 + if (more) {
422 crypto_yield(desc->flags);
423 return blkcipher_walk_next(desc, walk);
424 }
425 -
426 + err = 0;
427 +finish:
428 + walk->nbytes = 0;
429 if (walk->iv != desc->info)
430 memcpy(desc->info, walk->iv, walk->ivsize);
431 if (walk->buffer != walk->page)
432 kfree(walk->buffer);
433 if (walk->page)
434 free_page((unsigned long)walk->page);
435 -
436 return err;
437 }
438 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
439 diff --git a/crypto/vmac.c b/crypto/vmac.c
440 index df76a816cfb2..bb2fc787d615 100644
441 --- a/crypto/vmac.c
442 +++ b/crypto/vmac.c
443 @@ -1,6 +1,10 @@
444 /*
445 - * Modified to interface to the Linux kernel
446 + * VMAC: Message Authentication Code using Universal Hashing
447 + *
448 + * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
449 + *
450 * Copyright (c) 2009, Intel Corporation.
451 + * Copyright (c) 2018, Google Inc.
452 *
453 * This program is free software; you can redistribute it and/or modify it
454 * under the terms and conditions of the GNU General Public License,
455 @@ -16,14 +20,15 @@
456 * Place - Suite 330, Boston, MA 02111-1307 USA.
457 */
458
459 -/* --------------------------------------------------------------------------
460 - * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
461 - * This implementation is herby placed in the public domain.
462 - * The authors offers no warranty. Use at your own risk.
463 - * Please send bug reports to the authors.
464 - * Last modified: 17 APR 08, 1700 PDT
465 - * ----------------------------------------------------------------------- */
466 +/*
467 + * Derived from:
468 + * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
469 + * This implementation is herby placed in the public domain.
470 + * The authors offers no warranty. Use at your own risk.
471 + * Last modified: 17 APR 08, 1700 PDT
472 + */
473
474 +#include <asm/unaligned.h>
475 #include <linux/init.h>
476 #include <linux/types.h>
477 #include <linux/crypto.h>
478 @@ -31,9 +36,35 @@
479 #include <linux/scatterlist.h>
480 #include <asm/byteorder.h>
481 #include <crypto/scatterwalk.h>
482 -#include <crypto/vmac.h>
483 #include <crypto/internal/hash.h>
484
485 +/*
486 + * User definable settings.
487 + */
488 +#define VMAC_TAG_LEN 64
489 +#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
490 +#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
491 +#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
492 +
493 +/* per-transform (per-key) context */
494 +struct vmac_tfm_ctx {
495 + struct crypto_cipher *cipher;
496 + u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
497 + u64 polykey[2*VMAC_TAG_LEN/64];
498 + u64 l3key[2*VMAC_TAG_LEN/64];
499 +};
500 +
501 +/* per-request context */
502 +struct vmac_desc_ctx {
503 + union {
504 + u8 partial[VMAC_NHBYTES]; /* partial block */
505 + __le64 partial_words[VMAC_NHBYTES / 8];
506 + };
507 + unsigned int partial_size; /* size of the partial block */
508 + bool first_block_processed;
509 + u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
510 +};
511 +
512 /*
513 * Constants and masks
514 */
515 @@ -318,13 +349,6 @@ static void poly_step_func(u64 *ahi, u64 *alo,
516 } while (0)
517 #endif
518
519 -static void vhash_abort(struct vmac_ctx *ctx)
520 -{
521 - ctx->polytmp[0] = ctx->polykey[0] ;
522 - ctx->polytmp[1] = ctx->polykey[1] ;
523 - ctx->first_block_processed = 0;
524 -}
525 -
526 static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
527 {
528 u64 rh, rl, t, z = 0;
529 @@ -364,280 +388,209 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
530 return rl;
531 }
532
533 -static void vhash_update(const unsigned char *m,
534 - unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
535 - struct vmac_ctx *ctx)
536 +/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
537 +static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
538 + struct vmac_desc_ctx *dctx,
539 + const __le64 *mptr, unsigned int blocks)
540 {
541 - u64 rh, rl, *mptr;
542 - const u64 *kptr = (u64 *)ctx->nhkey;
543 - int i;
544 - u64 ch, cl;
545 - u64 pkh = ctx->polykey[0];
546 - u64 pkl = ctx->polykey[1];
547 -
548 - if (!mbytes)
549 - return;
550 -
551 - BUG_ON(mbytes % VMAC_NHBYTES);
552 -
553 - mptr = (u64 *)m;
554 - i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
555 -
556 - ch = ctx->polytmp[0];
557 - cl = ctx->polytmp[1];
558 -
559 - if (!ctx->first_block_processed) {
560 - ctx->first_block_processed = 1;
561 + const u64 *kptr = tctx->nhkey;
562 + const u64 pkh = tctx->polykey[0];
563 + const u64 pkl = tctx->polykey[1];
564 + u64 ch = dctx->polytmp[0];
565 + u64 cl = dctx->polytmp[1];
566 + u64 rh, rl;
567 +
568 + if (!dctx->first_block_processed) {
569 + dctx->first_block_processed = true;
570 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
571 rh &= m62;
572 ADD128(ch, cl, rh, rl);
573 mptr += (VMAC_NHBYTES/sizeof(u64));
574 - i--;
575 + blocks--;
576 }
577
578 - while (i--) {
579 + while (blocks--) {
580 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
581 rh &= m62;
582 poly_step(ch, cl, pkh, pkl, rh, rl);
583 mptr += (VMAC_NHBYTES/sizeof(u64));
584 }
585
586 - ctx->polytmp[0] = ch;
587 - ctx->polytmp[1] = cl;
588 + dctx->polytmp[0] = ch;
589 + dctx->polytmp[1] = cl;
590 }
591
592 -static u64 vhash(unsigned char m[], unsigned int mbytes,
593 - u64 *tagl, struct vmac_ctx *ctx)
594 +static int vmac_setkey(struct crypto_shash *tfm,
595 + const u8 *key, unsigned int keylen)
596 {
597 - u64 rh, rl, *mptr;
598 - const u64 *kptr = (u64 *)ctx->nhkey;
599 - int i, remaining;
600 - u64 ch, cl;
601 - u64 pkh = ctx->polykey[0];
602 - u64 pkl = ctx->polykey[1];
603 -
604 - mptr = (u64 *)m;
605 - i = mbytes / VMAC_NHBYTES;
606 - remaining = mbytes % VMAC_NHBYTES;
607 -
608 - if (ctx->first_block_processed) {
609 - ch = ctx->polytmp[0];
610 - cl = ctx->polytmp[1];
611 - } else if (i) {
612 - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
613 - ch &= m62;
614 - ADD128(ch, cl, pkh, pkl);
615 - mptr += (VMAC_NHBYTES/sizeof(u64));
616 - i--;
617 - } else if (remaining) {
618 - nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
619 - ch &= m62;
620 - ADD128(ch, cl, pkh, pkl);
621 - mptr += (VMAC_NHBYTES/sizeof(u64));
622 - goto do_l3;
623 - } else {/* Empty String */
624 - ch = pkh; cl = pkl;
625 - goto do_l3;
626 - }
627 -
628 - while (i--) {
629 - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
630 - rh &= m62;
631 - poly_step(ch, cl, pkh, pkl, rh, rl);
632 - mptr += (VMAC_NHBYTES/sizeof(u64));
633 - }
634 - if (remaining) {
635 - nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
636 - rh &= m62;
637 - poly_step(ch, cl, pkh, pkl, rh, rl);
638 - }
639 -
640 -do_l3:
641 - vhash_abort(ctx);
642 - remaining *= 8;
643 - return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
644 -}
645 + struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
646 + __be64 out[2];
647 + u8 in[16] = { 0 };
648 + unsigned int i;
649 + int err;
650
651 -static u64 vmac(unsigned char m[], unsigned int mbytes,
652 - const unsigned char n[16], u64 *tagl,
653 - struct vmac_ctx_t *ctx)
654 -{
655 - u64 *in_n, *out_p;
656 - u64 p, h;
657 - int i;
658 -
659 - in_n = ctx->__vmac_ctx.cached_nonce;
660 - out_p = ctx->__vmac_ctx.cached_aes;
661 -
662 - i = n[15] & 1;
663 - if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
664 - in_n[0] = *(u64 *)(n);
665 - in_n[1] = *(u64 *)(n+8);
666 - ((unsigned char *)in_n)[15] &= 0xFE;
667 - crypto_cipher_encrypt_one(ctx->child,
668 - (unsigned char *)out_p, (unsigned char *)in_n);
669 -
670 - ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
671 + if (keylen != VMAC_KEY_LEN) {
672 + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
673 + return -EINVAL;
674 }
675 - p = be64_to_cpup(out_p + i);
676 - h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
677 - return le64_to_cpu(p + h);
678 -}
679
680 -static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
681 -{
682 - u64 in[2] = {0}, out[2];
683 - unsigned i;
684 - int err = 0;
685 -
686 - err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
687 + err = crypto_cipher_setkey(tctx->cipher, key, keylen);
688 if (err)
689 return err;
690
691 /* Fill nh key */
692 - ((unsigned char *)in)[0] = 0x80;
693 - for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
694 - crypto_cipher_encrypt_one(ctx->child,
695 - (unsigned char *)out, (unsigned char *)in);
696 - ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
697 - ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
698 - ((unsigned char *)in)[15] += 1;
699 + in[0] = 0x80;
700 + for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
701 + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
702 + tctx->nhkey[i] = be64_to_cpu(out[0]);
703 + tctx->nhkey[i+1] = be64_to_cpu(out[1]);
704 + in[15]++;
705 }
706
707 /* Fill poly key */
708 - ((unsigned char *)in)[0] = 0xC0;
709 - in[1] = 0;
710 - for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
711 - crypto_cipher_encrypt_one(ctx->child,
712 - (unsigned char *)out, (unsigned char *)in);
713 - ctx->__vmac_ctx.polytmp[i] =
714 - ctx->__vmac_ctx.polykey[i] =
715 - be64_to_cpup(out) & mpoly;
716 - ctx->__vmac_ctx.polytmp[i+1] =
717 - ctx->__vmac_ctx.polykey[i+1] =
718 - be64_to_cpup(out+1) & mpoly;
719 - ((unsigned char *)in)[15] += 1;
720 + in[0] = 0xC0;
721 + in[15] = 0;
722 + for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
723 + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
724 + tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
725 + tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
726 + in[15]++;
727 }
728
729 /* Fill ip key */
730 - ((unsigned char *)in)[0] = 0xE0;
731 - in[1] = 0;
732 - for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
733 + in[0] = 0xE0;
734 + in[15] = 0;
735 + for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
736 do {
737 - crypto_cipher_encrypt_one(ctx->child,
738 - (unsigned char *)out, (unsigned char *)in);
739 - ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
740 - ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
741 - ((unsigned char *)in)[15] += 1;
742 - } while (ctx->__vmac_ctx.l3key[i] >= p64
743 - || ctx->__vmac_ctx.l3key[i+1] >= p64);
744 + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
745 + tctx->l3key[i] = be64_to_cpu(out[0]);
746 + tctx->l3key[i+1] = be64_to_cpu(out[1]);
747 + in[15]++;
748 + } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
749 }
750
751 - /* Invalidate nonce/aes cache and reset other elements */
752 - ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
753 - ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
754 - ctx->__vmac_ctx.first_block_processed = 0;
755 -
756 - return err;
757 + return 0;
758 }
759
760 -static int vmac_setkey(struct crypto_shash *parent,
761 - const u8 *key, unsigned int keylen)
762 +static int vmac_init(struct shash_desc *desc)
763 {
764 - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
765 + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
766 + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
767
768 - if (keylen != VMAC_KEY_LEN) {
769 - crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
770 - return -EINVAL;
771 - }
772 -
773 - return vmac_set_key((u8 *)key, ctx);
774 -}
775 -
776 -static int vmac_init(struct shash_desc *pdesc)
777 -{
778 + dctx->partial_size = 0;
779 + dctx->first_block_processed = false;
780 + memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
781 return 0;
782 }
783
784 -static int vmac_update(struct shash_desc *pdesc, const u8 *p,
785 - unsigned int len)
786 +static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
787 {
788 - struct crypto_shash *parent = pdesc->tfm;
789 - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
790 - int expand;
791 - int min;
792 -
793 - expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
794 - VMAC_NHBYTES - ctx->partial_size : 0;
795 -
796 - min = len < expand ? len : expand;
797 -
798 - memcpy(ctx->partial + ctx->partial_size, p, min);
799 - ctx->partial_size += min;
800 -
801 - if (len < expand)
802 - return 0;
803 -
804 - vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
805 - ctx->partial_size = 0;
806 -
807 - len -= expand;
808 - p += expand;
809 + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
810 + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
811 + unsigned int n;
812 +
813 + if (dctx->partial_size) {
814 + n = min(len, VMAC_NHBYTES - dctx->partial_size);
815 + memcpy(&dctx->partial[dctx->partial_size], p, n);
816 + dctx->partial_size += n;
817 + p += n;
818 + len -= n;
819 + if (dctx->partial_size == VMAC_NHBYTES) {
820 + vhash_blocks(tctx, dctx, dctx->partial_words, 1);
821 + dctx->partial_size = 0;
822 + }
823 + }
824
825 - if (len % VMAC_NHBYTES) {
826 - memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
827 - len % VMAC_NHBYTES);
828 - ctx->partial_size = len % VMAC_NHBYTES;
829 + if (len >= VMAC_NHBYTES) {
830 + n = round_down(len, VMAC_NHBYTES);
831 + /* TODO: 'p' may be misaligned here */
832 + vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
833 + p += n;
834 + len -= n;
835 }
836
837 - vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
838 + if (len) {
839 + memcpy(dctx->partial, p, len);
840 + dctx->partial_size = len;
841 + }
842
843 return 0;
844 }
845
846 -static int vmac_final(struct shash_desc *pdesc, u8 *out)
847 +static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
848 + struct vmac_desc_ctx *dctx)
849 {
850 - struct crypto_shash *parent = pdesc->tfm;
851 - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
852 - vmac_t mac;
853 - u8 nonce[16] = {};
854 -
855 - /* vmac() ends up accessing outside the array bounds that
856 - * we specify. In appears to access up to the next 2-word
857 - * boundary. We'll just be uber cautious and zero the
858 - * unwritten bytes in the buffer.
859 - */
860 - if (ctx->partial_size) {
861 - memset(ctx->partial + ctx->partial_size, 0,
862 - VMAC_NHBYTES - ctx->partial_size);
863 + unsigned int partial = dctx->partial_size;
864 + u64 ch = dctx->polytmp[0];
865 + u64 cl = dctx->polytmp[1];
866 +
867 + /* L1 and L2-hash the final block if needed */
868 + if (partial) {
869 + /* Zero-pad to next 128-bit boundary */
870 + unsigned int n = round_up(partial, 16);
871 + u64 rh, rl;
872 +
873 + memset(&dctx->partial[partial], 0, n - partial);
874 + nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
875 + rh &= m62;
876 + if (dctx->first_block_processed)
877 + poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
878 + rh, rl);
879 + else
880 + ADD128(ch, cl, rh, rl);
881 }
882 - mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
883 - memcpy(out, &mac, sizeof(vmac_t));
884 - memzero_explicit(&mac, sizeof(vmac_t));
885 - memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
886 - ctx->partial_size = 0;
887 +
888 + /* L3-hash the 128-bit output of L2-hash */
889 + return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
890 +}
891 +
892 +static int vmac_final(struct shash_desc *desc, u8 *out)
893 +{
894 + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
895 + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
896 + static const u8 nonce[16] = {}; /* TODO: this is insecure */
897 + union {
898 + u8 bytes[16];
899 + __be64 pads[2];
900 + } block;
901 + int index;
902 + u64 hash, pad;
903 +
904 + /* Finish calculating the VHASH of the message */
905 + hash = vhash_final(tctx, dctx);
906 +
907 + /* Generate pseudorandom pad by encrypting the nonce */
908 + memcpy(&block, nonce, 16);
909 + index = block.bytes[15] & 1;
910 + block.bytes[15] &= ~1;
911 + crypto_cipher_encrypt_one(tctx->cipher, block.bytes, block.bytes);
912 + pad = be64_to_cpu(block.pads[index]);
913 +
914 + /* The VMAC is the sum of VHASH and the pseudorandom pad */
915 + put_unaligned_le64(hash + pad, out);
916 return 0;
917 }
918
919 static int vmac_init_tfm(struct crypto_tfm *tfm)
920 {
921 - struct crypto_cipher *cipher;
922 - struct crypto_instance *inst = (void *)tfm->__crt_alg;
923 + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
924 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
925 - struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
926 + struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
927 + struct crypto_cipher *cipher;
928
929 cipher = crypto_spawn_cipher(spawn);
930 if (IS_ERR(cipher))
931 return PTR_ERR(cipher);
932
933 - ctx->child = cipher;
934 + tctx->cipher = cipher;
935 return 0;
936 }
937
938 static void vmac_exit_tfm(struct crypto_tfm *tfm)
939 {
940 - struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
941 - crypto_free_cipher(ctx->child);
942 + struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
943 +
944 + crypto_free_cipher(tctx->cipher);
945 }
946
947 static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
948 @@ -655,6 +608,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
949 if (IS_ERR(alg))
950 return PTR_ERR(alg);
951
952 + err = -EINVAL;
953 + if (alg->cra_blocksize != 16)
954 + goto out_put_alg;
955 +
956 inst = shash_alloc_instance("vmac", alg);
957 err = PTR_ERR(inst);
958 if (IS_ERR(inst))
959 @@ -670,11 +627,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
960 inst->alg.base.cra_blocksize = alg->cra_blocksize;
961 inst->alg.base.cra_alignmask = alg->cra_alignmask;
962
963 - inst->alg.digestsize = sizeof(vmac_t);
964 - inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
965 + inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
966 inst->alg.base.cra_init = vmac_init_tfm;
967 inst->alg.base.cra_exit = vmac_exit_tfm;
968
969 + inst->alg.descsize = sizeof(struct vmac_desc_ctx);
970 + inst->alg.digestsize = VMAC_TAG_LEN / 8;
971 inst->alg.init = vmac_init;
972 inst->alg.update = vmac_update;
973 inst->alg.final = vmac_final;
974 diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
975 index 7aea28815d99..b51adffa4841 100644
976 --- a/drivers/i2c/busses/i2c-ismt.c
977 +++ b/drivers/i2c/busses/i2c-ismt.c
978 @@ -589,7 +589,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
979
980 /* unmap the data buffer */
981 if (dma_size != 0)
982 - dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction);
983 + dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
984
985 if (unlikely(!time_left)) {
986 dev_err(dev, "completion wait timed out\n");
987 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
988 index a88ea9e37a25..0a4c2d4d9f8d 100644
989 --- a/include/asm-generic/pgtable.h
990 +++ b/include/asm-generic/pgtable.h
991 @@ -779,8 +779,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
992 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
993 int pud_clear_huge(pud_t *pud);
994 int pmd_clear_huge(pmd_t *pmd);
995 -int pud_free_pmd_page(pud_t *pud);
996 -int pmd_free_pte_page(pmd_t *pmd);
997 +int pud_free_pmd_page(pud_t *pud, unsigned long addr);
998 +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
999 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
1000 static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1001 {
1002 @@ -798,11 +798,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
1003 {
1004 return 0;
1005 }
1006 -static inline int pud_free_pmd_page(pud_t *pud)
1007 +static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1008 {
1009 return 0;
1010 }
1011 -static inline int pmd_free_pte_page(pmd_t *pmd)
1012 +static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1013 {
1014 return 0;
1015 }
1016 diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
1017 deleted file mode 100644
1018 index 6b700c7b2fe1..000000000000
1019 --- a/include/crypto/vmac.h
1020 +++ /dev/null
1021 @@ -1,63 +0,0 @@
1022 -/*
1023 - * Modified to interface to the Linux kernel
1024 - * Copyright (c) 2009, Intel Corporation.
1025 - *
1026 - * This program is free software; you can redistribute it and/or modify it
1027 - * under the terms and conditions of the GNU General Public License,
1028 - * version 2, as published by the Free Software Foundation.
1029 - *
1030 - * This program is distributed in the hope it will be useful, but WITHOUT
1031 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1032 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1033 - * more details.
1034 - *
1035 - * You should have received a copy of the GNU General Public License along with
1036 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
1037 - * Place - Suite 330, Boston, MA 02111-1307 USA.
1038 - */
1039 -
1040 -#ifndef __CRYPTO_VMAC_H
1041 -#define __CRYPTO_VMAC_H
1042 -
1043 -/* --------------------------------------------------------------------------
1044 - * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
1045 - * This implementation is herby placed in the public domain.
1046 - * The authors offers no warranty. Use at your own risk.
1047 - * Please send bug reports to the authors.
1048 - * Last modified: 17 APR 08, 1700 PDT
1049 - * ----------------------------------------------------------------------- */
1050 -
1051 -/*
1052 - * User definable settings.
1053 - */
1054 -#define VMAC_TAG_LEN 64
1055 -#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
1056 -#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
1057 -#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
1058 -
1059 -/*
1060 - * This implementation uses u32 and u64 as names for unsigned 32-
1061 - * and 64-bit integer types. These are defined in C99 stdint.h. The
1062 - * following may need adaptation if you are not running a C99 or
1063 - * Microsoft C environment.
1064 - */
1065 -struct vmac_ctx {
1066 - u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
1067 - u64 polykey[2*VMAC_TAG_LEN/64];
1068 - u64 l3key[2*VMAC_TAG_LEN/64];
1069 - u64 polytmp[2*VMAC_TAG_LEN/64];
1070 - u64 cached_nonce[2];
1071 - u64 cached_aes[2];
1072 - int first_block_processed;
1073 -};
1074 -
1075 -typedef u64 vmac_t;
1076 -
1077 -struct vmac_ctx_t {
1078 - struct crypto_cipher *child;
1079 - struct vmac_ctx __vmac_ctx;
1080 - u8 partial[VMAC_NHBYTES]; /* partial block */
1081 - int partial_size; /* size of the partial block */
1082 -};
1083 -
1084 -#endif /* __CRYPTO_VMAC_H */
1085 diff --git a/lib/ioremap.c b/lib/ioremap.c
1086 index 5323b59ca393..b9462037868d 100644
1087 --- a/lib/ioremap.c
1088 +++ b/lib/ioremap.c
1089 @@ -84,7 +84,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
1090 if (ioremap_pmd_enabled() &&
1091 ((next - addr) == PMD_SIZE) &&
1092 IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
1093 - pmd_free_pte_page(pmd)) {
1094 + pmd_free_pte_page(pmd, addr)) {
1095 if (pmd_set_huge(pmd, phys_addr + addr, prot))
1096 continue;
1097 }
1098 @@ -111,7 +111,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
1099 if (ioremap_pud_enabled() &&
1100 ((next - addr) == PUD_SIZE) &&
1101 IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
1102 - pud_free_pmd_page(pud)) {
1103 + pud_free_pmd_page(pud, addr)) {
1104 if (pud_set_huge(pud, phys_addr + addr, prot))
1105 continue;
1106 }
1107 diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
1108 index 1fc076420d1e..1811f8e7ddf4 100644
1109 --- a/net/bluetooth/hidp/core.c
1110 +++ b/net/bluetooth/hidp/core.c
1111 @@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session)
1112 del_timer(&session->timer);
1113 }
1114
1115 -static void hidp_process_report(struct hidp_session *session,
1116 - int type, const u8 *data, int len, int intr)
1117 +static void hidp_process_report(struct hidp_session *session, int type,
1118 + const u8 *data, unsigned int len, int intr)
1119 {
1120 if (len > HID_MAX_BUFFER_SIZE)
1121 len = HID_MAX_BUFFER_SIZE;
1122 diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
1123 index 37323b0df374..2624d4bf9a45 100644
1124 --- a/scripts/Makefile.kasan
1125 +++ b/scripts/Makefile.kasan
1126 @@ -28,4 +28,7 @@ else
1127 CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL)
1128 endif
1129 endif
1130 +
1131 +CFLAGS_KASAN_NOSANITIZE := -fno-builtin
1132 +
1133 endif
1134 diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
1135 index ae0f9ab1a70d..c954040c3cf2 100644
1136 --- a/scripts/Makefile.lib
1137 +++ b/scripts/Makefile.lib
1138 @@ -127,7 +127,7 @@ endif
1139 ifeq ($(CONFIG_KASAN),y)
1140 _c_flags += $(if $(patsubst n%,, \
1141 $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
1142 - $(CFLAGS_KASAN))
1143 + $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE))
1144 endif
1145
1146 ifeq ($(CONFIG_UBSAN),y)
1147 diff --git a/scripts/depmod.sh b/scripts/depmod.sh
1148 index 122599b1c13b..ea1e96921e3b 100755
1149 --- a/scripts/depmod.sh
1150 +++ b/scripts/depmod.sh
1151 @@ -10,10 +10,16 @@ DEPMOD=$1
1152 KERNELRELEASE=$2
1153 SYMBOL_PREFIX=$3
1154
1155 -if ! test -r System.map -a -x "$DEPMOD"; then
1156 +if ! test -r System.map ; then
1157 exit 0
1158 fi
1159
1160 +if [ -z $(command -v $DEPMOD) ]; then
1161 + echo "'make modules_install' requires $DEPMOD. Please install it." >&2
1162 + echo "This is probably in the kmod package." >&2
1163 + exit 1
1164 +fi
1165 +
1166 # older versions of depmod don't support -P <symbol-prefix>
1167 # support was added in module-init-tools 3.13
1168 if test -n "$SYMBOL_PREFIX"; then
1169 diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
1170 index cdcced9f32b6..b7c1e3d74ccc 100644
1171 --- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
1172 +++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
1173 @@ -128,23 +128,19 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
1174 struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
1175 struct snd_soc_jack *jack = &ctx->jack;
1176
1177 - /**
1178 - * TI supports 4 butons headset detection
1179 - * KEY_MEDIA
1180 - * KEY_VOICECOMMAND
1181 - * KEY_VOLUMEUP
1182 - * KEY_VOLUMEDOWN
1183 - */
1184 - if (ctx->ts3a227e_present)
1185 - jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
1186 - SND_JACK_BTN_0 | SND_JACK_BTN_1 |
1187 - SND_JACK_BTN_2 | SND_JACK_BTN_3;
1188 - else
1189 - jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
1190 + if (ctx->ts3a227e_present) {
1191 + /*
1192 + * The jack has already been created in the
1193 + * cht_max98090_headset_init() function.
1194 + */
1195 + snd_soc_jack_notifier_register(jack, &cht_jack_nb);
1196 + return 0;
1197 + }
1198 +
1199 + jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
1200
1201 ret = snd_soc_card_jack_new(runtime->card, "Headset Jack",
1202 jack_type, jack, NULL, 0);
1203 -
1204 if (ret) {
1205 dev_err(runtime->dev, "Headset Jack creation failed %d\n", ret);
1206 return ret;
1207 @@ -200,6 +196,27 @@ static int cht_max98090_headset_init(struct snd_soc_component *component)
1208 {
1209 struct snd_soc_card *card = component->card;
1210 struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
1211 + struct snd_soc_jack *jack = &ctx->jack;
1212 + int jack_type;
1213 + int ret;
1214 +
1215 + /*
1216 + * TI supports 4 butons headset detection
1217 + * KEY_MEDIA
1218 + * KEY_VOICECOMMAND
1219 + * KEY_VOLUMEUP
1220 + * KEY_VOLUMEDOWN
1221 + */
1222 + jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
1223 + SND_JACK_BTN_0 | SND_JACK_BTN_1 |
1224 + SND_JACK_BTN_2 | SND_JACK_BTN_3;
1225 +
1226 + ret = snd_soc_card_jack_new(card, "Headset Jack", jack_type,
1227 + jack, NULL, 0);
1228 + if (ret) {
1229 + dev_err(card->dev, "Headset Jack creation failed %d\n", ret);
1230 + return ret;
1231 + }
1232
1233 return ts3a227e_enable_jack_detect(component, &ctx->jack);
1234 }