Magellan Linux

Contents of /trunk/kernel-alx/patches-4.14/0163-4.14.64-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (show annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 5 months ago) by niro
File size: 45437 byte(s)
-added up to patches-4.14.79
1 diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
2 index 560beaef5a7c..73fcdcd52b87 100644
3 --- a/Documentation/process/changes.rst
4 +++ b/Documentation/process/changes.rst
5 @@ -33,7 +33,7 @@ GNU C 3.2 gcc --version
6 GNU make 3.81 make --version
7 binutils 2.20 ld -v
8 util-linux 2.10o fdformat --version
9 -module-init-tools 0.9.10 depmod -V
10 +kmod 13 depmod -V
11 e2fsprogs 1.41.4 e2fsck -V
12 jfsutils 1.1.3 fsck.jfs -V
13 reiserfsprogs 3.6.3 reiserfsck -V
14 @@ -141,12 +141,6 @@ is not build with ``CONFIG_KALLSYMS`` and you have no way to rebuild and
15 reproduce the Oops with that option, then you can still decode that Oops
16 with ksymoops.
17
18 -Module-Init-Tools
19 ------------------
20 -
21 -A new module loader is now in the kernel that requires ``module-init-tools``
22 -to use. It is backward compatible with the 2.4.x series kernels.
23 -
24 Mkinitrd
25 --------
26
27 @@ -346,16 +340,17 @@ Util-linux
28
29 - <https://www.kernel.org/pub/linux/utils/util-linux/>
30
31 +Kmod
32 +----
33 +
34 +- <https://www.kernel.org/pub/linux/utils/kernel/kmod/>
35 +- <https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git>
36 +
37 Ksymoops
38 --------
39
40 - <https://www.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
41
42 -Module-Init-Tools
43 ------------------
44 -
45 -- <https://www.kernel.org/pub/linux/utils/kernel/module-init-tools/>
46 -
47 Mkinitrd
48 --------
49
50 diff --git a/Makefile b/Makefile
51 index f3bb9428b3dc..025156791e90 100644
52 --- a/Makefile
53 +++ b/Makefile
54 @@ -1,7 +1,7 @@
55 # SPDX-License-Identifier: GPL-2.0
56 VERSION = 4
57 PATCHLEVEL = 14
58 -SUBLEVEL = 63
59 +SUBLEVEL = 64
60 EXTRAVERSION =
61 NAME = Petit Gorille
62
63 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
64 index f6b877d2726d..6ac0d32d60a5 100644
65 --- a/arch/arm64/mm/mmu.c
66 +++ b/arch/arm64/mm/mmu.c
67 @@ -938,12 +938,12 @@ int pmd_clear_huge(pmd_t *pmd)
68 return 1;
69 }
70
71 -int pud_free_pmd_page(pud_t *pud)
72 +int pud_free_pmd_page(pud_t *pud, unsigned long addr)
73 {
74 return pud_none(*pud);
75 }
76
77 -int pmd_free_pte_page(pmd_t *pmd)
78 +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
79 {
80 return pmd_none(*pmd);
81 }
82 diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
83 index 16c4ccb1f154..d2364c55bbde 100644
84 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
85 +++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
86 @@ -265,7 +265,7 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
87 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
88 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
89 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
90 - vmovd _args_digest(state , idx, 4) , %xmm0
91 + vmovd _args_digest+4*32(state, idx, 4), %xmm1
92 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
93 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
94 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
95 diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
96 index 5cdcdbd4d892..89789e8c80f6 100644
97 --- a/arch/x86/include/asm/i8259.h
98 +++ b/arch/x86/include/asm/i8259.h
99 @@ -3,6 +3,7 @@
100 #define _ASM_X86_I8259_H
101
102 #include <linux/delay.h>
103 +#include <asm/io.h>
104
105 extern unsigned int cached_irq_mask;
106
107 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
108 index edfc64a8a154..d07addb99b71 100644
109 --- a/arch/x86/kernel/cpu/bugs.c
110 +++ b/arch/x86/kernel/cpu/bugs.c
111 @@ -648,10 +648,9 @@ void x86_spec_ctrl_setup_ap(void)
112 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
113 #if IS_ENABLED(CONFIG_KVM_INTEL)
114 EXPORT_SYMBOL_GPL(l1tf_mitigation);
115 -
116 +#endif
117 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
118 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
119 -#endif
120
121 static void __init l1tf_select_mitigation(void)
122 {
123 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
124 index c03c85e4fb6a..2bdb8e8a9d7c 100644
125 --- a/arch/x86/mm/pgtable.c
126 +++ b/arch/x86/mm/pgtable.c
127 @@ -712,28 +712,50 @@ int pmd_clear_huge(pmd_t *pmd)
128 return 0;
129 }
130
131 +#ifdef CONFIG_X86_64
132 /**
133 * pud_free_pmd_page - Clear pud entry and free pmd page.
134 * @pud: Pointer to a PUD.
135 + * @addr: Virtual address associated with pud.
136 *
137 - * Context: The pud range has been unmaped and TLB purged.
138 + * Context: The pud range has been unmapped and TLB purged.
139 * Return: 1 if clearing the entry succeeded. 0 otherwise.
140 + *
141 + * NOTE: Callers must allow a single page allocation.
142 */
143 -int pud_free_pmd_page(pud_t *pud)
144 +int pud_free_pmd_page(pud_t *pud, unsigned long addr)
145 {
146 - pmd_t *pmd;
147 + pmd_t *pmd, *pmd_sv;
148 + pte_t *pte;
149 int i;
150
151 if (pud_none(*pud))
152 return 1;
153
154 pmd = (pmd_t *)pud_page_vaddr(*pud);
155 + pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
156 + if (!pmd_sv)
157 + return 0;
158
159 - for (i = 0; i < PTRS_PER_PMD; i++)
160 - if (!pmd_free_pte_page(&pmd[i]))
161 - return 0;
162 + for (i = 0; i < PTRS_PER_PMD; i++) {
163 + pmd_sv[i] = pmd[i];
164 + if (!pmd_none(pmd[i]))
165 + pmd_clear(&pmd[i]);
166 + }
167
168 pud_clear(pud);
169 +
170 + /* INVLPG to clear all paging-structure caches */
171 + flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
172 +
173 + for (i = 0; i < PTRS_PER_PMD; i++) {
174 + if (!pmd_none(pmd_sv[i])) {
175 + pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
176 + free_page((unsigned long)pte);
177 + }
178 + }
179 +
180 + free_page((unsigned long)pmd_sv);
181 free_page((unsigned long)pmd);
182
183 return 1;
184 @@ -742,11 +764,12 @@ int pud_free_pmd_page(pud_t *pud)
185 /**
186 * pmd_free_pte_page - Clear pmd entry and free pte page.
187 * @pmd: Pointer to a PMD.
188 + * @addr: Virtual address associated with pmd.
189 *
190 - * Context: The pmd range has been unmaped and TLB purged.
191 + * Context: The pmd range has been unmapped and TLB purged.
192 * Return: 1 if clearing the entry succeeded. 0 otherwise.
193 */
194 -int pmd_free_pte_page(pmd_t *pmd)
195 +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
196 {
197 pte_t *pte;
198
199 @@ -755,8 +778,30 @@ int pmd_free_pte_page(pmd_t *pmd)
200
201 pte = (pte_t *)pmd_page_vaddr(*pmd);
202 pmd_clear(pmd);
203 +
204 + /* INVLPG to clear all paging-structure caches */
205 + flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
206 +
207 free_page((unsigned long)pte);
208
209 return 1;
210 }
211 +
212 +#else /* !CONFIG_X86_64 */
213 +
214 +int pud_free_pmd_page(pud_t *pud, unsigned long addr)
215 +{
216 + return pud_none(*pud);
217 +}
218 +
219 +/*
220 + * Disable free page handling on x86-PAE. This assures that ioremap()
221 + * does not update sync'd pmd entries. See vmalloc_sync_one().
222 + */
223 +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
224 +{
225 + return pmd_none(*pmd);
226 +}
227 +
228 +#endif /* CONFIG_X86_64 */
229 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
230 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
231 index 4a4b7d3c909a..3b44bd28fc45 100644
232 --- a/block/bfq-iosched.c
233 +++ b/block/bfq-iosched.c
234 @@ -1203,6 +1203,24 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
235 return dur;
236 }
237
238 +/*
239 + * Return the farthest future time instant according to jiffies
240 + * macros.
241 + */
242 +static unsigned long bfq_greatest_from_now(void)
243 +{
244 + return jiffies + MAX_JIFFY_OFFSET;
245 +}
246 +
247 +/*
248 + * Return the farthest past time instant according to jiffies
249 + * macros.
250 + */
251 +static unsigned long bfq_smallest_from_now(void)
252 +{
253 + return jiffies - MAX_JIFFY_OFFSET;
254 +}
255 +
256 static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
257 struct bfq_queue *bfqq,
258 unsigned int old_wr_coeff,
259 @@ -1217,7 +1235,19 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
260 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
261 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
262 } else {
263 - bfqq->wr_start_at_switch_to_srt = jiffies;
264 + /*
265 + * No interactive weight raising in progress
266 + * here: assign minus infinity to
267 + * wr_start_at_switch_to_srt, to make sure
268 + * that, at the end of the soft-real-time
269 + * weight raising periods that is starting
270 + * now, no interactive weight-raising period
271 + * may be wrongly considered as still in
272 + * progress (and thus actually started by
273 + * mistake).
274 + */
275 + bfqq->wr_start_at_switch_to_srt =
276 + bfq_smallest_from_now();
277 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
278 BFQ_SOFTRT_WEIGHT_FACTOR;
279 bfqq->wr_cur_max_time =
280 @@ -2896,24 +2926,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
281 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
282 }
283
284 -/*
285 - * Return the farthest future time instant according to jiffies
286 - * macros.
287 - */
288 -static unsigned long bfq_greatest_from_now(void)
289 -{
290 - return jiffies + MAX_JIFFY_OFFSET;
291 -}
292 -
293 -/*
294 - * Return the farthest past time instant according to jiffies
295 - * macros.
296 - */
297 -static unsigned long bfq_smallest_from_now(void)
298 -{
299 - return jiffies - MAX_JIFFY_OFFSET;
300 -}
301 -
302 /**
303 * bfq_bfqq_expire - expire a queue.
304 * @bfqd: device owning the queue.
305 diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
306 index d880a4897159..4ee7c041bb82 100644
307 --- a/crypto/ablkcipher.c
308 +++ b/crypto/ablkcipher.c
309 @@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
310 return max(start, end_page);
311 }
312
313 -static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
314 - unsigned int bsize)
315 +static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
316 + unsigned int n)
317 {
318 - unsigned int n = bsize;
319 -
320 for (;;) {
321 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
322
323 @@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
324 n -= len_this_page;
325 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
326 }
327 -
328 - return bsize;
329 }
330
331 -static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
332 - unsigned int n)
333 +static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
334 + unsigned int n)
335 {
336 scatterwalk_advance(&walk->in, n);
337 scatterwalk_advance(&walk->out, n);
338 -
339 - return n;
340 }
341
342 static int ablkcipher_walk_next(struct ablkcipher_request *req,
343 @@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
344 struct ablkcipher_walk *walk, int err)
345 {
346 struct crypto_tfm *tfm = req->base.tfm;
347 - unsigned int nbytes = 0;
348 + unsigned int n; /* bytes processed */
349 + bool more;
350
351 - if (likely(err >= 0)) {
352 - unsigned int n = walk->nbytes - err;
353 + if (unlikely(err < 0))
354 + goto finish;
355
356 - if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
357 - n = ablkcipher_done_fast(walk, n);
358 - else if (WARN_ON(err)) {
359 - err = -EINVAL;
360 - goto err;
361 - } else
362 - n = ablkcipher_done_slow(walk, n);
363 + n = walk->nbytes - err;
364 + walk->total -= n;
365 + more = (walk->total != 0);
366
367 - nbytes = walk->total - n;
368 - err = 0;
369 + if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
370 + ablkcipher_done_fast(walk, n);
371 + } else {
372 + if (WARN_ON(err)) {
373 + /* unexpected case; didn't process all bytes */
374 + err = -EINVAL;
375 + goto finish;
376 + }
377 + ablkcipher_done_slow(walk, n);
378 }
379
380 - scatterwalk_done(&walk->in, 0, nbytes);
381 - scatterwalk_done(&walk->out, 1, nbytes);
382 -
383 -err:
384 - walk->total = nbytes;
385 - walk->nbytes = nbytes;
386 + scatterwalk_done(&walk->in, 0, more);
387 + scatterwalk_done(&walk->out, 1, more);
388
389 - if (nbytes) {
390 + if (more) {
391 crypto_yield(req->base.flags);
392 return ablkcipher_walk_next(req, walk);
393 }
394 -
395 + err = 0;
396 +finish:
397 + walk->nbytes = 0;
398 if (walk->iv != req->info)
399 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
400 kfree(walk->iv_buffer);
401 -
402 return err;
403 }
404 EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
405 diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
406 index 6c43a0a17a55..d84c6920ada9 100644
407 --- a/crypto/blkcipher.c
408 +++ b/crypto/blkcipher.c
409 @@ -71,19 +71,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
410 return max(start, end_page);
411 }
412
413 -static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
414 - unsigned int bsize)
415 +static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
416 + unsigned int bsize)
417 {
418 u8 *addr;
419
420 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
421 addr = blkcipher_get_spot(addr, bsize);
422 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
423 - return bsize;
424 }
425
426 -static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
427 - unsigned int n)
428 +static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
429 + unsigned int n)
430 {
431 if (walk->flags & BLKCIPHER_WALK_COPY) {
432 blkcipher_map_dst(walk);
433 @@ -97,49 +96,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
434
435 scatterwalk_advance(&walk->in, n);
436 scatterwalk_advance(&walk->out, n);
437 -
438 - return n;
439 }
440
441 int blkcipher_walk_done(struct blkcipher_desc *desc,
442 struct blkcipher_walk *walk, int err)
443 {
444 - unsigned int nbytes = 0;
445 + unsigned int n; /* bytes processed */
446 + bool more;
447
448 - if (likely(err >= 0)) {
449 - unsigned int n = walk->nbytes - err;
450 + if (unlikely(err < 0))
451 + goto finish;
452
453 - if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
454 - n = blkcipher_done_fast(walk, n);
455 - else if (WARN_ON(err)) {
456 - err = -EINVAL;
457 - goto err;
458 - } else
459 - n = blkcipher_done_slow(walk, n);
460 + n = walk->nbytes - err;
461 + walk->total -= n;
462 + more = (walk->total != 0);
463
464 - nbytes = walk->total - n;
465 - err = 0;
466 + if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
467 + blkcipher_done_fast(walk, n);
468 + } else {
469 + if (WARN_ON(err)) {
470 + /* unexpected case; didn't process all bytes */
471 + err = -EINVAL;
472 + goto finish;
473 + }
474 + blkcipher_done_slow(walk, n);
475 }
476
477 - scatterwalk_done(&walk->in, 0, nbytes);
478 - scatterwalk_done(&walk->out, 1, nbytes);
479 + scatterwalk_done(&walk->in, 0, more);
480 + scatterwalk_done(&walk->out, 1, more);
481
482 -err:
483 - walk->total = nbytes;
484 - walk->nbytes = nbytes;
485 -
486 - if (nbytes) {
487 + if (more) {
488 crypto_yield(desc->flags);
489 return blkcipher_walk_next(desc, walk);
490 }
491 -
492 + err = 0;
493 +finish:
494 + walk->nbytes = 0;
495 if (walk->iv != desc->info)
496 memcpy(desc->info, walk->iv, walk->ivsize);
497 if (walk->buffer != walk->page)
498 kfree(walk->buffer);
499 if (walk->page)
500 free_page((unsigned long)walk->page);
501 -
502 return err;
503 }
504 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
505 diff --git a/crypto/skcipher.c b/crypto/skcipher.c
506 index 11af5fd6a443..e319421a32e7 100644
507 --- a/crypto/skcipher.c
508 +++ b/crypto/skcipher.c
509 @@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
510 return max(start, end_page);
511 }
512
513 -static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
514 +static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
515 {
516 u8 *addr;
517
518 @@ -103,23 +103,24 @@ static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
519 addr = skcipher_get_spot(addr, bsize);
520 scatterwalk_copychunks(addr, &walk->out, bsize,
521 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
522 - return 0;
523 }
524
525 int skcipher_walk_done(struct skcipher_walk *walk, int err)
526 {
527 - unsigned int n = walk->nbytes - err;
528 - unsigned int nbytes;
529 + unsigned int n; /* bytes processed */
530 + bool more;
531 +
532 + if (unlikely(err < 0))
533 + goto finish;
534
535 - nbytes = walk->total - n;
536 + n = walk->nbytes - err;
537 + walk->total -= n;
538 + more = (walk->total != 0);
539
540 - if (unlikely(err < 0)) {
541 - nbytes = 0;
542 - n = 0;
543 - } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
544 - SKCIPHER_WALK_SLOW |
545 - SKCIPHER_WALK_COPY |
546 - SKCIPHER_WALK_DIFF)))) {
547 + if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
548 + SKCIPHER_WALK_SLOW |
549 + SKCIPHER_WALK_COPY |
550 + SKCIPHER_WALK_DIFF)))) {
551 unmap_src:
552 skcipher_unmap_src(walk);
553 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
554 @@ -131,28 +132,28 @@ unmap_src:
555 skcipher_unmap_dst(walk);
556 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
557 if (WARN_ON(err)) {
558 + /* unexpected case; didn't process all bytes */
559 err = -EINVAL;
560 - nbytes = 0;
561 - } else
562 - n = skcipher_done_slow(walk, n);
563 + goto finish;
564 + }
565 + skcipher_done_slow(walk, n);
566 + goto already_advanced;
567 }
568
569 - if (err > 0)
570 - err = 0;
571 -
572 - walk->total = nbytes;
573 - walk->nbytes = nbytes;
574 -
575 scatterwalk_advance(&walk->in, n);
576 scatterwalk_advance(&walk->out, n);
577 - scatterwalk_done(&walk->in, 0, nbytes);
578 - scatterwalk_done(&walk->out, 1, nbytes);
579 +already_advanced:
580 + scatterwalk_done(&walk->in, 0, more);
581 + scatterwalk_done(&walk->out, 1, more);
582
583 - if (nbytes) {
584 + if (more) {
585 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
586 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
587 return skcipher_walk_next(walk);
588 }
589 + err = 0;
590 +finish:
591 + walk->nbytes = 0;
592
593 /* Short-circuit for the common/fast path. */
594 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
595 @@ -399,7 +400,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
596 unsigned size;
597 u8 *iv;
598
599 - aligned_bs = ALIGN(bs, alignmask);
600 + aligned_bs = ALIGN(bs, alignmask + 1);
601
602 /* Minimum size to align buffer by alignmask. */
603 size = alignmask & ~a;
604 diff --git a/crypto/vmac.c b/crypto/vmac.c
605 index df76a816cfb2..bb2fc787d615 100644
606 --- a/crypto/vmac.c
607 +++ b/crypto/vmac.c
608 @@ -1,6 +1,10 @@
609 /*
610 - * Modified to interface to the Linux kernel
611 + * VMAC: Message Authentication Code using Universal Hashing
612 + *
613 + * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
614 + *
615 * Copyright (c) 2009, Intel Corporation.
616 + * Copyright (c) 2018, Google Inc.
617 *
618 * This program is free software; you can redistribute it and/or modify it
619 * under the terms and conditions of the GNU General Public License,
620 @@ -16,14 +20,15 @@
621 * Place - Suite 330, Boston, MA 02111-1307 USA.
622 */
623
624 -/* --------------------------------------------------------------------------
625 - * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
626 - * This implementation is herby placed in the public domain.
627 - * The authors offers no warranty. Use at your own risk.
628 - * Please send bug reports to the authors.
629 - * Last modified: 17 APR 08, 1700 PDT
630 - * ----------------------------------------------------------------------- */
631 +/*
632 + * Derived from:
633 + * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
634 + * This implementation is herby placed in the public domain.
635 + * The authors offers no warranty. Use at your own risk.
636 + * Last modified: 17 APR 08, 1700 PDT
637 + */
638
639 +#include <asm/unaligned.h>
640 #include <linux/init.h>
641 #include <linux/types.h>
642 #include <linux/crypto.h>
643 @@ -31,9 +36,35 @@
644 #include <linux/scatterlist.h>
645 #include <asm/byteorder.h>
646 #include <crypto/scatterwalk.h>
647 -#include <crypto/vmac.h>
648 #include <crypto/internal/hash.h>
649
650 +/*
651 + * User definable settings.
652 + */
653 +#define VMAC_TAG_LEN 64
654 +#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
655 +#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
656 +#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
657 +
658 +/* per-transform (per-key) context */
659 +struct vmac_tfm_ctx {
660 + struct crypto_cipher *cipher;
661 + u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
662 + u64 polykey[2*VMAC_TAG_LEN/64];
663 + u64 l3key[2*VMAC_TAG_LEN/64];
664 +};
665 +
666 +/* per-request context */
667 +struct vmac_desc_ctx {
668 + union {
669 + u8 partial[VMAC_NHBYTES]; /* partial block */
670 + __le64 partial_words[VMAC_NHBYTES / 8];
671 + };
672 + unsigned int partial_size; /* size of the partial block */
673 + bool first_block_processed;
674 + u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
675 +};
676 +
677 /*
678 * Constants and masks
679 */
680 @@ -318,13 +349,6 @@ static void poly_step_func(u64 *ahi, u64 *alo,
681 } while (0)
682 #endif
683
684 -static void vhash_abort(struct vmac_ctx *ctx)
685 -{
686 - ctx->polytmp[0] = ctx->polykey[0] ;
687 - ctx->polytmp[1] = ctx->polykey[1] ;
688 - ctx->first_block_processed = 0;
689 -}
690 -
691 static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
692 {
693 u64 rh, rl, t, z = 0;
694 @@ -364,280 +388,209 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
695 return rl;
696 }
697
698 -static void vhash_update(const unsigned char *m,
699 - unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
700 - struct vmac_ctx *ctx)
701 +/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
702 +static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
703 + struct vmac_desc_ctx *dctx,
704 + const __le64 *mptr, unsigned int blocks)
705 {
706 - u64 rh, rl, *mptr;
707 - const u64 *kptr = (u64 *)ctx->nhkey;
708 - int i;
709 - u64 ch, cl;
710 - u64 pkh = ctx->polykey[0];
711 - u64 pkl = ctx->polykey[1];
712 -
713 - if (!mbytes)
714 - return;
715 -
716 - BUG_ON(mbytes % VMAC_NHBYTES);
717 -
718 - mptr = (u64 *)m;
719 - i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
720 -
721 - ch = ctx->polytmp[0];
722 - cl = ctx->polytmp[1];
723 -
724 - if (!ctx->first_block_processed) {
725 - ctx->first_block_processed = 1;
726 + const u64 *kptr = tctx->nhkey;
727 + const u64 pkh = tctx->polykey[0];
728 + const u64 pkl = tctx->polykey[1];
729 + u64 ch = dctx->polytmp[0];
730 + u64 cl = dctx->polytmp[1];
731 + u64 rh, rl;
732 +
733 + if (!dctx->first_block_processed) {
734 + dctx->first_block_processed = true;
735 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
736 rh &= m62;
737 ADD128(ch, cl, rh, rl);
738 mptr += (VMAC_NHBYTES/sizeof(u64));
739 - i--;
740 + blocks--;
741 }
742
743 - while (i--) {
744 + while (blocks--) {
745 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
746 rh &= m62;
747 poly_step(ch, cl, pkh, pkl, rh, rl);
748 mptr += (VMAC_NHBYTES/sizeof(u64));
749 }
750
751 - ctx->polytmp[0] = ch;
752 - ctx->polytmp[1] = cl;
753 + dctx->polytmp[0] = ch;
754 + dctx->polytmp[1] = cl;
755 }
756
757 -static u64 vhash(unsigned char m[], unsigned int mbytes,
758 - u64 *tagl, struct vmac_ctx *ctx)
759 +static int vmac_setkey(struct crypto_shash *tfm,
760 + const u8 *key, unsigned int keylen)
761 {
762 - u64 rh, rl, *mptr;
763 - const u64 *kptr = (u64 *)ctx->nhkey;
764 - int i, remaining;
765 - u64 ch, cl;
766 - u64 pkh = ctx->polykey[0];
767 - u64 pkl = ctx->polykey[1];
768 -
769 - mptr = (u64 *)m;
770 - i = mbytes / VMAC_NHBYTES;
771 - remaining = mbytes % VMAC_NHBYTES;
772 -
773 - if (ctx->first_block_processed) {
774 - ch = ctx->polytmp[0];
775 - cl = ctx->polytmp[1];
776 - } else if (i) {
777 - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
778 - ch &= m62;
779 - ADD128(ch, cl, pkh, pkl);
780 - mptr += (VMAC_NHBYTES/sizeof(u64));
781 - i--;
782 - } else if (remaining) {
783 - nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
784 - ch &= m62;
785 - ADD128(ch, cl, pkh, pkl);
786 - mptr += (VMAC_NHBYTES/sizeof(u64));
787 - goto do_l3;
788 - } else {/* Empty String */
789 - ch = pkh; cl = pkl;
790 - goto do_l3;
791 - }
792 -
793 - while (i--) {
794 - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
795 - rh &= m62;
796 - poly_step(ch, cl, pkh, pkl, rh, rl);
797 - mptr += (VMAC_NHBYTES/sizeof(u64));
798 - }
799 - if (remaining) {
800 - nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
801 - rh &= m62;
802 - poly_step(ch, cl, pkh, pkl, rh, rl);
803 - }
804 -
805 -do_l3:
806 - vhash_abort(ctx);
807 - remaining *= 8;
808 - return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
809 -}
810 + struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
811 + __be64 out[2];
812 + u8 in[16] = { 0 };
813 + unsigned int i;
814 + int err;
815
816 -static u64 vmac(unsigned char m[], unsigned int mbytes,
817 - const unsigned char n[16], u64 *tagl,
818 - struct vmac_ctx_t *ctx)
819 -{
820 - u64 *in_n, *out_p;
821 - u64 p, h;
822 - int i;
823 -
824 - in_n = ctx->__vmac_ctx.cached_nonce;
825 - out_p = ctx->__vmac_ctx.cached_aes;
826 -
827 - i = n[15] & 1;
828 - if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
829 - in_n[0] = *(u64 *)(n);
830 - in_n[1] = *(u64 *)(n+8);
831 - ((unsigned char *)in_n)[15] &= 0xFE;
832 - crypto_cipher_encrypt_one(ctx->child,
833 - (unsigned char *)out_p, (unsigned char *)in_n);
834 -
835 - ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
836 + if (keylen != VMAC_KEY_LEN) {
837 + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
838 + return -EINVAL;
839 }
840 - p = be64_to_cpup(out_p + i);
841 - h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
842 - return le64_to_cpu(p + h);
843 -}
844
845 -static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
846 -{
847 - u64 in[2] = {0}, out[2];
848 - unsigned i;
849 - int err = 0;
850 -
851 - err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
852 + err = crypto_cipher_setkey(tctx->cipher, key, keylen);
853 if (err)
854 return err;
855
856 /* Fill nh key */
857 - ((unsigned char *)in)[0] = 0x80;
858 - for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
859 - crypto_cipher_encrypt_one(ctx->child,
860 - (unsigned char *)out, (unsigned char *)in);
861 - ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
862 - ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
863 - ((unsigned char *)in)[15] += 1;
864 + in[0] = 0x80;
865 + for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
866 + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
867 + tctx->nhkey[i] = be64_to_cpu(out[0]);
868 + tctx->nhkey[i+1] = be64_to_cpu(out[1]);
869 + in[15]++;
870 }
871
872 /* Fill poly key */
873 - ((unsigned char *)in)[0] = 0xC0;
874 - in[1] = 0;
875 - for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
876 - crypto_cipher_encrypt_one(ctx->child,
877 - (unsigned char *)out, (unsigned char *)in);
878 - ctx->__vmac_ctx.polytmp[i] =
879 - ctx->__vmac_ctx.polykey[i] =
880 - be64_to_cpup(out) & mpoly;
881 - ctx->__vmac_ctx.polytmp[i+1] =
882 - ctx->__vmac_ctx.polykey[i+1] =
883 - be64_to_cpup(out+1) & mpoly;
884 - ((unsigned char *)in)[15] += 1;
885 + in[0] = 0xC0;
886 + in[15] = 0;
887 + for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
888 + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
889 + tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
890 + tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
891 + in[15]++;
892 }
893
894 /* Fill ip key */
895 - ((unsigned char *)in)[0] = 0xE0;
896 - in[1] = 0;
897 - for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
898 + in[0] = 0xE0;
899 + in[15] = 0;
900 + for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
901 do {
902 - crypto_cipher_encrypt_one(ctx->child,
903 - (unsigned char *)out, (unsigned char *)in);
904 - ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
905 - ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
906 - ((unsigned char *)in)[15] += 1;
907 - } while (ctx->__vmac_ctx.l3key[i] >= p64
908 - || ctx->__vmac_ctx.l3key[i+1] >= p64);
909 + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
910 + tctx->l3key[i] = be64_to_cpu(out[0]);
911 + tctx->l3key[i+1] = be64_to_cpu(out[1]);
912 + in[15]++;
913 + } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
914 }
915
916 - /* Invalidate nonce/aes cache and reset other elements */
917 - ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
918 - ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
919 - ctx->__vmac_ctx.first_block_processed = 0;
920 -
921 - return err;
922 + return 0;
923 }
924
925 -static int vmac_setkey(struct crypto_shash *parent,
926 - const u8 *key, unsigned int keylen)
927 +static int vmac_init(struct shash_desc *desc)
928 {
929 - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
930 + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
931 + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
932
933 - if (keylen != VMAC_KEY_LEN) {
934 - crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
935 - return -EINVAL;
936 - }
937 -
938 - return vmac_set_key((u8 *)key, ctx);
939 -}
940 -
941 -static int vmac_init(struct shash_desc *pdesc)
942 -{
943 + dctx->partial_size = 0;
944 + dctx->first_block_processed = false;
945 + memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
946 return 0;
947 }
948
949 -static int vmac_update(struct shash_desc *pdesc, const u8 *p,
950 - unsigned int len)
951 +static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
952 {
953 - struct crypto_shash *parent = pdesc->tfm;
954 - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
955 - int expand;
956 - int min;
957 -
958 - expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
959 - VMAC_NHBYTES - ctx->partial_size : 0;
960 -
961 - min = len < expand ? len : expand;
962 -
963 - memcpy(ctx->partial + ctx->partial_size, p, min);
964 - ctx->partial_size += min;
965 -
966 - if (len < expand)
967 - return 0;
968 -
969 - vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
970 - ctx->partial_size = 0;
971 -
972 - len -= expand;
973 - p += expand;
974 + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
975 + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
976 + unsigned int n;
977 +
978 + if (dctx->partial_size) {
979 + n = min(len, VMAC_NHBYTES - dctx->partial_size);
980 + memcpy(&dctx->partial[dctx->partial_size], p, n);
981 + dctx->partial_size += n;
982 + p += n;
983 + len -= n;
984 + if (dctx->partial_size == VMAC_NHBYTES) {
985 + vhash_blocks(tctx, dctx, dctx->partial_words, 1);
986 + dctx->partial_size = 0;
987 + }
988 + }
989
990 - if (len % VMAC_NHBYTES) {
991 - memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
992 - len % VMAC_NHBYTES);
993 - ctx->partial_size = len % VMAC_NHBYTES;
994 + if (len >= VMAC_NHBYTES) {
995 + n = round_down(len, VMAC_NHBYTES);
996 + /* TODO: 'p' may be misaligned here */
997 + vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
998 + p += n;
999 + len -= n;
1000 }
1001
1002 - vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
1003 + if (len) {
1004 + memcpy(dctx->partial, p, len);
1005 + dctx->partial_size = len;
1006 + }
1007
1008 return 0;
1009 }
1010
1011 -static int vmac_final(struct shash_desc *pdesc, u8 *out)
1012 +static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
1013 + struct vmac_desc_ctx *dctx)
1014 {
1015 - struct crypto_shash *parent = pdesc->tfm;
1016 - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
1017 - vmac_t mac;
1018 - u8 nonce[16] = {};
1019 -
1020 - /* vmac() ends up accessing outside the array bounds that
1021 - * we specify. In appears to access up to the next 2-word
1022 - * boundary. We'll just be uber cautious and zero the
1023 - * unwritten bytes in the buffer.
1024 - */
1025 - if (ctx->partial_size) {
1026 - memset(ctx->partial + ctx->partial_size, 0,
1027 - VMAC_NHBYTES - ctx->partial_size);
1028 + unsigned int partial = dctx->partial_size;
1029 + u64 ch = dctx->polytmp[0];
1030 + u64 cl = dctx->polytmp[1];
1031 +
1032 + /* L1 and L2-hash the final block if needed */
1033 + if (partial) {
1034 + /* Zero-pad to next 128-bit boundary */
1035 + unsigned int n = round_up(partial, 16);
1036 + u64 rh, rl;
1037 +
1038 + memset(&dctx->partial[partial], 0, n - partial);
1039 + nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
1040 + rh &= m62;
1041 + if (dctx->first_block_processed)
1042 + poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
1043 + rh, rl);
1044 + else
1045 + ADD128(ch, cl, rh, rl);
1046 }
1047 - mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
1048 - memcpy(out, &mac, sizeof(vmac_t));
1049 - memzero_explicit(&mac, sizeof(vmac_t));
1050 - memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
1051 - ctx->partial_size = 0;
1052 +
1053 + /* L3-hash the 128-bit output of L2-hash */
1054 + return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
1055 +}
1056 +
1057 +static int vmac_final(struct shash_desc *desc, u8 *out)
1058 +{
1059 + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
1060 + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
1061 + static const u8 nonce[16] = {}; /* TODO: this is insecure */
1062 + union {
1063 + u8 bytes[16];
1064 + __be64 pads[2];
1065 + } block;
1066 + int index;
1067 + u64 hash, pad;
1068 +
1069 + /* Finish calculating the VHASH of the message */
1070 + hash = vhash_final(tctx, dctx);
1071 +
1072 + /* Generate pseudorandom pad by encrypting the nonce */
1073 + memcpy(&block, nonce, 16);
1074 + index = block.bytes[15] & 1;
1075 + block.bytes[15] &= ~1;
1076 + crypto_cipher_encrypt_one(tctx->cipher, block.bytes, block.bytes);
1077 + pad = be64_to_cpu(block.pads[index]);
1078 +
1079 + /* The VMAC is the sum of VHASH and the pseudorandom pad */
1080 + put_unaligned_le64(hash + pad, out);
1081 return 0;
1082 }
1083
1084 static int vmac_init_tfm(struct crypto_tfm *tfm)
1085 {
1086 - struct crypto_cipher *cipher;
1087 - struct crypto_instance *inst = (void *)tfm->__crt_alg;
1088 + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
1089 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
1090 - struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
1091 + struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
1092 + struct crypto_cipher *cipher;
1093
1094 cipher = crypto_spawn_cipher(spawn);
1095 if (IS_ERR(cipher))
1096 return PTR_ERR(cipher);
1097
1098 - ctx->child = cipher;
1099 + tctx->cipher = cipher;
1100 return 0;
1101 }
1102
1103 static void vmac_exit_tfm(struct crypto_tfm *tfm)
1104 {
1105 - struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
1106 - crypto_free_cipher(ctx->child);
1107 + struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
1108 +
1109 + crypto_free_cipher(tctx->cipher);
1110 }
1111
1112 static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
1113 @@ -655,6 +608,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
1114 if (IS_ERR(alg))
1115 return PTR_ERR(alg);
1116
1117 + err = -EINVAL;
1118 + if (alg->cra_blocksize != 16)
1119 + goto out_put_alg;
1120 +
1121 inst = shash_alloc_instance("vmac", alg);
1122 err = PTR_ERR(inst);
1123 if (IS_ERR(inst))
1124 @@ -670,11 +627,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
1125 inst->alg.base.cra_blocksize = alg->cra_blocksize;
1126 inst->alg.base.cra_alignmask = alg->cra_alignmask;
1127
1128 - inst->alg.digestsize = sizeof(vmac_t);
1129 - inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
1130 + inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
1131 inst->alg.base.cra_init = vmac_init_tfm;
1132 inst->alg.base.cra_exit = vmac_exit_tfm;
1133
1134 + inst->alg.descsize = sizeof(struct vmac_desc_ctx);
1135 + inst->alg.digestsize = VMAC_TAG_LEN / 8;
1136 inst->alg.init = vmac_init;
1137 inst->alg.update = vmac_update;
1138 inst->alg.final = vmac_final;
1139 diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
1140 index 572b6c7303ed..f14695e744d0 100644
1141 --- a/drivers/acpi/apei/ghes.c
1142 +++ b/drivers/acpi/apei/ghes.c
1143 @@ -114,19 +114,7 @@ static DEFINE_MUTEX(ghes_list_mutex);
1144 * from BIOS to Linux can be determined only in NMI, IRQ or timer
1145 * handler, but general ioremap can not be used in atomic context, so
1146 * the fixmap is used instead.
1147 - */
1148 -
1149 -/*
1150 - * Two virtual pages are used, one for IRQ/PROCESS context, the other for
1151 - * NMI context (optionally).
1152 - */
1153 -#define GHES_IOREMAP_PAGES 2
1154 -#define GHES_IOREMAP_IRQ_PAGE(base) (base)
1155 -#define GHES_IOREMAP_NMI_PAGE(base) ((base) + PAGE_SIZE)
1156 -
1157 -/* virtual memory area for atomic ioremap */
1158 -static struct vm_struct *ghes_ioremap_area;
1159 -/*
1160 + *
1161 * These 2 spinlocks are used to prevent the fixmap entries from being used
1162 * simultaneously.
1163 */
1164 @@ -141,23 +129,6 @@ static atomic_t ghes_estatus_cache_alloced;
1165
1166 static int ghes_panic_timeout __read_mostly = 30;
1167
1168 -static int ghes_ioremap_init(void)
1169 -{
1170 - ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
1171 - VM_IOREMAP, VMALLOC_START, VMALLOC_END);
1172 - if (!ghes_ioremap_area) {
1173 - pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
1174 - return -ENOMEM;
1175 - }
1176 -
1177 - return 0;
1178 -}
1179 -
1180 -static void ghes_ioremap_exit(void)
1181 -{
1182 - free_vm_area(ghes_ioremap_area);
1183 -}
1184 -
1185 static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
1186 {
1187 phys_addr_t paddr;
1188 @@ -1247,13 +1218,9 @@ static int __init ghes_init(void)
1189
1190 ghes_nmi_init_cxt();
1191
1192 - rc = ghes_ioremap_init();
1193 - if (rc)
1194 - goto err;
1195 -
1196 rc = ghes_estatus_pool_init();
1197 if (rc)
1198 - goto err_ioremap_exit;
1199 + goto err;
1200
1201 rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
1202 GHES_ESTATUS_CACHE_ALLOCED_MAX);
1203 @@ -1277,8 +1244,6 @@ static int __init ghes_init(void)
1204 return 0;
1205 err_pool_exit:
1206 ghes_estatus_pool_exit();
1207 -err_ioremap_exit:
1208 - ghes_ioremap_exit();
1209 err:
1210 return rc;
1211 }
1212 diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
1213 index 286b0049b7b6..a48fde191c0a 100644
1214 --- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
1215 +++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
1216 @@ -223,7 +223,7 @@ static struct ccu_mux cpu_clk = {
1217 .hw.init = CLK_HW_INIT_PARENTS("cpu",
1218 cpu_parents,
1219 &ccu_mux_ops,
1220 - CLK_IS_CRITICAL),
1221 + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
1222 }
1223 };
1224
1225 diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
1226 index 0e2011636fbb..c53c7ac992f8 100644
1227 --- a/drivers/firmware/qemu_fw_cfg.c
1228 +++ b/drivers/firmware/qemu_fw_cfg.c
1229 @@ -582,9 +582,10 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev)
1230 {
1231 pr_debug("fw_cfg: unloading.\n");
1232 fw_cfg_sysfs_cache_cleanup();
1233 + sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
1234 + fw_cfg_io_cleanup();
1235 fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
1236 fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
1237 - fw_cfg_io_cleanup();
1238 return 0;
1239 }
1240
1241 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
1242 index 46a2f5d9aa25..f00421dfacbd 100644
1243 --- a/include/asm-generic/pgtable.h
1244 +++ b/include/asm-generic/pgtable.h
1245 @@ -991,8 +991,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
1246 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
1247 int pud_clear_huge(pud_t *pud);
1248 int pmd_clear_huge(pmd_t *pmd);
1249 -int pud_free_pmd_page(pud_t *pud);
1250 -int pmd_free_pte_page(pmd_t *pmd);
1251 +int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1252 +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
1253 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
1254 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1255 {
1256 @@ -1018,11 +1018,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
1257 {
1258 return 0;
1259 }
1260 -static inline int pud_free_pmd_page(pud_t *pud)
1261 +static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1262 {
1263 return 0;
1264 }
1265 -static inline int pmd_free_pte_page(pmd_t *pmd)
1266 +static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1267 {
1268 return 0;
1269 }
1270 diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
1271 deleted file mode 100644
1272 index 6b700c7b2fe1..000000000000
1273 --- a/include/crypto/vmac.h
1274 +++ /dev/null
1275 @@ -1,63 +0,0 @@
1276 -/*
1277 - * Modified to interface to the Linux kernel
1278 - * Copyright (c) 2009, Intel Corporation.
1279 - *
1280 - * This program is free software; you can redistribute it and/or modify it
1281 - * under the terms and conditions of the GNU General Public License,
1282 - * version 2, as published by the Free Software Foundation.
1283 - *
1284 - * This program is distributed in the hope it will be useful, but WITHOUT
1285 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1286 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1287 - * more details.
1288 - *
1289 - * You should have received a copy of the GNU General Public License along with
1290 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
1291 - * Place - Suite 330, Boston, MA 02111-1307 USA.
1292 - */
1293 -
1294 -#ifndef __CRYPTO_VMAC_H
1295 -#define __CRYPTO_VMAC_H
1296 -
1297 -/* --------------------------------------------------------------------------
1298 - * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
1299 - * This implementation is herby placed in the public domain.
1300 - * The authors offers no warranty. Use at your own risk.
1301 - * Please send bug reports to the authors.
1302 - * Last modified: 17 APR 08, 1700 PDT
1303 - * ----------------------------------------------------------------------- */
1304 -
1305 -/*
1306 - * User definable settings.
1307 - */
1308 -#define VMAC_TAG_LEN 64
1309 -#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
1310 -#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
1311 -#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
1312 -
1313 -/*
1314 - * This implementation uses u32 and u64 as names for unsigned 32-
1315 - * and 64-bit integer types. These are defined in C99 stdint.h. The
1316 - * following may need adaptation if you are not running a C99 or
1317 - * Microsoft C environment.
1318 - */
1319 -struct vmac_ctx {
1320 - u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
1321 - u64 polykey[2*VMAC_TAG_LEN/64];
1322 - u64 l3key[2*VMAC_TAG_LEN/64];
1323 - u64 polytmp[2*VMAC_TAG_LEN/64];
1324 - u64 cached_nonce[2];
1325 - u64 cached_aes[2];
1326 - int first_block_processed;
1327 -};
1328 -
1329 -typedef u64 vmac_t;
1330 -
1331 -struct vmac_ctx_t {
1332 - struct crypto_cipher *child;
1333 - struct vmac_ctx __vmac_ctx;
1334 - u8 partial[VMAC_NHBYTES]; /* partial block */
1335 - int partial_size; /* size of the partial block */
1336 -};
1337 -
1338 -#endif /* __CRYPTO_VMAC_H */
1339 diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
1340 index d447f24df970..0812cd5408c9 100644
1341 --- a/include/trace/events/sched.h
1342 +++ b/include/trace/events/sched.h
1343 @@ -116,9 +116,9 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
1344 * RUNNING (we will not have dequeued if state != RUNNING).
1345 */
1346 if (preempt)
1347 - return TASK_STATE_MAX;
1348 + return TASK_REPORT_MAX;
1349
1350 - return __get_task_state(p);
1351 + return 1 << __get_task_state(p);
1352 }
1353 #endif /* CREATE_TRACE_POINTS */
1354
1355 @@ -164,7 +164,7 @@ TRACE_EVENT(sched_switch,
1356 { 0x40, "P" }, { 0x80, "I" }) :
1357 "R",
1358
1359 - __entry->prev_state & TASK_STATE_MAX ? "+" : "",
1360 + __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
1361 __entry->next_comm, __entry->next_pid, __entry->next_prio)
1362 );
1363
1364 diff --git a/lib/ioremap.c b/lib/ioremap.c
1365 index 54e5bbaa3200..517f5853ffed 100644
1366 --- a/lib/ioremap.c
1367 +++ b/lib/ioremap.c
1368 @@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
1369 if (ioremap_pmd_enabled() &&
1370 ((next - addr) == PMD_SIZE) &&
1371 IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
1372 - pmd_free_pte_page(pmd)) {
1373 + pmd_free_pte_page(pmd, addr)) {
1374 if (pmd_set_huge(pmd, phys_addr + addr, prot))
1375 continue;
1376 }
1377 @@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
1378 if (ioremap_pud_enabled() &&
1379 ((next - addr) == PUD_SIZE) &&
1380 IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
1381 - pud_free_pmd_page(pud)) {
1382 + pud_free_pmd_page(pud, addr)) {
1383 if (pud_set_huge(pud, phys_addr + addr, prot))
1384 continue;
1385 }
1386 diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
1387 index 8112893037bd..cef3754408d4 100644
1388 --- a/net/bluetooth/hidp/core.c
1389 +++ b/net/bluetooth/hidp/core.c
1390 @@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session)
1391 del_timer(&session->timer);
1392 }
1393
1394 -static void hidp_process_report(struct hidp_session *session,
1395 - int type, const u8 *data, int len, int intr)
1396 +static void hidp_process_report(struct hidp_session *session, int type,
1397 + const u8 *data, unsigned int len, int intr)
1398 {
1399 if (len > HID_MAX_BUFFER_SIZE)
1400 len = HID_MAX_BUFFER_SIZE;
1401 diff --git a/scripts/depmod.sh b/scripts/depmod.sh
1402 index 9831cca31240..f41b0a4b575c 100755
1403 --- a/scripts/depmod.sh
1404 +++ b/scripts/depmod.sh
1405 @@ -11,10 +11,16 @@ DEPMOD=$1
1406 KERNELRELEASE=$2
1407 SYMBOL_PREFIX=$3
1408
1409 -if ! test -r System.map -a -x "$DEPMOD"; then
1410 +if ! test -r System.map ; then
1411 exit 0
1412 fi
1413
1414 +if [ -z $(command -v $DEPMOD) ]; then
1415 + echo "'make modules_install' requires $DEPMOD. Please install it." >&2
1416 + echo "This is probably in the kmod package." >&2
1417 + exit 1
1418 +fi
1419 +
1420 # older versions of depmod don't support -P <symbol-prefix>
1421 # support was added in module-init-tools 3.13
1422 if test -n "$SYMBOL_PREFIX"; then
1423 diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
1424 index 694db27b11fa..13354d6304a8 100644
1425 --- a/sound/soc/codecs/msm8916-wcd-digital.c
1426 +++ b/sound/soc/codecs/msm8916-wcd-digital.c
1427 @@ -238,7 +238,7 @@ static const struct soc_enum rx_mix2_inp1_chain_enum = SOC_ENUM_SINGLE(
1428 static const struct soc_enum rx2_mix1_inp_enum[] = {
1429 SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text),
1430 SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 3, 6, rx_mix1_text),
1431 - SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text),
1432 + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B2_CTL, 0, 6, rx_mix1_text),
1433 };
1434
1435 /* RX2 MIX2 */
1436 @@ -249,7 +249,7 @@ static const struct soc_enum rx2_mix2_inp1_chain_enum = SOC_ENUM_SINGLE(
1437 static const struct soc_enum rx3_mix1_inp_enum[] = {
1438 SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text),
1439 SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 3, 6, rx_mix1_text),
1440 - SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text),
1441 + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B2_CTL, 0, 6, rx_mix1_text),
1442 };
1443
1444 /* DEC */
1445 diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
1446 index 20755ecc7f9e..a02dec251afe 100644
1447 --- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
1448 +++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
1449 @@ -116,23 +116,19 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
1450 struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
1451 struct snd_soc_jack *jack = &ctx->jack;
1452
1453 - /**
1454 - * TI supports 4 butons headset detection
1455 - * KEY_MEDIA
1456 - * KEY_VOICECOMMAND
1457 - * KEY_VOLUMEUP
1458 - * KEY_VOLUMEDOWN
1459 - */
1460 - if (ctx->ts3a227e_present)
1461 - jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
1462 - SND_JACK_BTN_0 | SND_JACK_BTN_1 |
1463 - SND_JACK_BTN_2 | SND_JACK_BTN_3;
1464 - else
1465 - jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
1466 + if (ctx->ts3a227e_present) {
1467 + /*
1468 + * The jack has already been created in the
1469 + * cht_max98090_headset_init() function.
1470 + */
1471 + snd_soc_jack_notifier_register(jack, &cht_jack_nb);
1472 + return 0;
1473 + }
1474 +
1475 + jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
1476
1477 ret = snd_soc_card_jack_new(runtime->card, "Headset Jack",
1478 jack_type, jack, NULL, 0);
1479 -
1480 if (ret) {
1481 dev_err(runtime->dev, "Headset Jack creation failed %d\n", ret);
1482 return ret;
1483 @@ -188,6 +184,27 @@ static int cht_max98090_headset_init(struct snd_soc_component *component)
1484 {
1485 struct snd_soc_card *card = component->card;
1486 struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
1487 + struct snd_soc_jack *jack = &ctx->jack;
1488 + int jack_type;
1489 + int ret;
1490 +
1491 + /*
1492 + * TI supports 4 butons headset detection
1493 + * KEY_MEDIA
1494 + * KEY_VOICECOMMAND
1495 + * KEY_VOLUMEUP
1496 + * KEY_VOLUMEDOWN
1497 + */
1498 + jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
1499 + SND_JACK_BTN_0 | SND_JACK_BTN_1 |
1500 + SND_JACK_BTN_2 | SND_JACK_BTN_3;
1501 +
1502 + ret = snd_soc_card_jack_new(card, "Headset Jack", jack_type,
1503 + jack, NULL, 0);
1504 + if (ret) {
1505 + dev_err(card->dev, "Headset Jack creation failed %d\n", ret);
1506 + return ret;
1507 + }
1508
1509 return ts3a227e_enable_jack_detect(component, &ctx->jack);
1510 }
1511 diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
1512 index 2684a2ba33cd..e28edb1f7263 100644
1513 --- a/sound/soc/sh/rcar/adg.c
1514 +++ b/sound/soc/sh/rcar/adg.c
1515 @@ -479,10 +479,10 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
1516 }
1517
1518 if (req_rate[0] % 48000 == 0)
1519 - adg->flags = AUDIO_OUT_48;
1520 + adg->flags |= AUDIO_OUT_48;
1521
1522 if (of_get_property(np, "clkout-lr-asynchronous", NULL))
1523 - adg->flags = LRCLK_ASYNC;
1524 + adg->flags |= LRCLK_ASYNC;
1525
1526 /*
1527 * This driver is assuming that AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC