Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0277-4.9.178-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 10 months ago) by niro
File size: 51787 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
2     index 534e9baa4e1d..5d4330be200f 100644
3     --- a/Documentation/x86/mds.rst
4     +++ b/Documentation/x86/mds.rst
5     @@ -142,45 +142,13 @@ Mitigation points
6     mds_user_clear.
7    
8     The mitigation is invoked in prepare_exit_to_usermode() which covers
9     - most of the kernel to user space transitions. There are a few exceptions
10     - which are not invoking prepare_exit_to_usermode() on return to user
11     - space. These exceptions use the paranoid exit code.
12     + all but one of the kernel to user space transitions. The exception
13     + is when we return from a Non Maskable Interrupt (NMI), which is
14     + handled directly in do_nmi().
15    
16     - - Non Maskable Interrupt (NMI):
17     -
18     - Access to sensible data like keys, credentials in the NMI context is
19     - mostly theoretical: The CPU can do prefetching or execute a
20     - misspeculated code path and thereby fetching data which might end up
21     - leaking through a buffer.
22     -
23     - But for mounting other attacks the kernel stack address of the task is
24     - already valuable information. So in full mitigation mode, the NMI is
25     - mitigated on the return from do_nmi() to provide almost complete
26     - coverage.
27     -
28     - - Double fault (#DF):
29     -
30     - A double fault is usually fatal, but the ESPFIX workaround, which can
31     - be triggered from user space through modify_ldt(2) is a recoverable
32     - double fault. #DF uses the paranoid exit path, so explicit mitigation
33     - in the double fault handler is required.
34     -
35     - - Machine Check Exception (#MC):
36     -
37     - Another corner case is a #MC which hits between the CPU buffer clear
38     - invocation and the actual return to user. As this still is in kernel
39     - space it takes the paranoid exit path which does not clear the CPU
40     - buffers. So the #MC handler repopulates the buffers to some
41     - extent. Machine checks are not reliably controllable and the window is
42     - extremly small so mitigation would just tick a checkbox that this
43     - theoretical corner case is covered. To keep the amount of special
44     - cases small, ignore #MC.
45     -
46     - - Debug Exception (#DB):
47     -
48     - This takes the paranoid exit path only when the INT1 breakpoint is in
49     - kernel space. #DB on a user space address takes the regular exit path,
50     - so no extra mitigation required.
51     + (The reason that NMI is special is that prepare_exit_to_usermode() can
52     + enable IRQs. In NMI context, NMIs are blocked, and we don't want to
53     + enable IRQs with NMIs blocked.)
54    
55    
56     2. C-State transition
57     diff --git a/Makefile b/Makefile
58     index ceb8f4bf6245..e9fae7a3c621 100644
59     --- a/Makefile
60     +++ b/Makefile
61     @@ -1,6 +1,6 @@
62     VERSION = 4
63     PATCHLEVEL = 9
64     -SUBLEVEL = 177
65     +SUBLEVEL = 178
66     EXTRAVERSION =
67     NAME = Roaring Lionus
68    
69     diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
70     index 5d934a0039d7..cb2486a526e6 100644
71     --- a/arch/arm/crypto/aesbs-glue.c
72     +++ b/arch/arm/crypto/aesbs-glue.c
73     @@ -265,6 +265,8 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
74    
75     blkcipher_walk_init(&walk, dst, src, nbytes);
76     err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
77     + if (err)
78     + return err;
79    
80     /* generate the initial tweak */
81     AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
82     @@ -289,6 +291,8 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
83    
84     blkcipher_walk_init(&walk, dst, src, nbytes);
85     err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
86     + if (err)
87     + return err;
88    
89     /* generate the initial tweak */
90     AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
91     diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
92     index fd6da5419b51..2199c3adfd84 100644
93     --- a/arch/arm/mach-exynos/firmware.c
94     +++ b/arch/arm/mach-exynos/firmware.c
95     @@ -205,6 +205,7 @@ void __init exynos_firmware_init(void)
96     return;
97    
98     addr = of_get_address(nd, 0, NULL, NULL);
99     + of_node_put(nd);
100     if (!addr) {
101     pr_err("%s: No address specified.\n", __func__);
102     return;
103     diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
104     index 3e1430a886b2..81c935ce089b 100644
105     --- a/arch/arm/mach-exynos/suspend.c
106     +++ b/arch/arm/mach-exynos/suspend.c
107     @@ -715,8 +715,10 @@ void __init exynos_pm_init(void)
108    
109     if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
110     pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
111     + of_node_put(np);
112     return;
113     }
114     + of_node_put(np);
115    
116     pm_data = (const struct exynos_pm_data *) match->data;
117    
118     diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
119     index 5917147af0c4..9ee660013e5c 100644
120     --- a/arch/arm64/include/asm/processor.h
121     +++ b/arch/arm64/include/asm/processor.h
122     @@ -49,7 +49,15 @@
123     * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
124     */
125     #ifdef CONFIG_COMPAT
126     +#ifdef CONFIG_ARM64_64K_PAGES
127     +/*
128     + * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
129     + * by the compat vectors page.
130     + */
131     #define TASK_SIZE_32 UL(0x100000000)
132     +#else
133     +#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
134     +#endif /* CONFIG_ARM64_64K_PAGES */
135     #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
136     TASK_SIZE_32 : TASK_SIZE_64)
137     #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
138     diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
139     index 73ae90ef434c..9f1adca3c346 100644
140     --- a/arch/arm64/kernel/debug-monitors.c
141     +++ b/arch/arm64/kernel/debug-monitors.c
142     @@ -132,6 +132,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors);
143     */
144     static int clear_os_lock(unsigned int cpu)
145     {
146     + write_sysreg(0, osdlr_el1);
147     write_sysreg(0, oslar_el1);
148     isb();
149     return 0;
150     diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
151     index cd4df9322501..7bbfe7d35da7 100644
152     --- a/arch/x86/crypto/crct10dif-pclmul_glue.c
153     +++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
154     @@ -76,15 +76,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
155     return 0;
156     }
157    
158     -static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
159     - u8 *out)
160     +static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
161     {
162     if (irq_fpu_usable()) {
163     kernel_fpu_begin();
164     - *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
165     + *(__u16 *)out = crc_t10dif_pcl(crc, data, len);
166     kernel_fpu_end();
167     } else
168     - *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
169     + *(__u16 *)out = crc_t10dif_generic(crc, data, len);
170     return 0;
171     }
172    
173     @@ -93,15 +92,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
174     {
175     struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
176    
177     - return __chksum_finup(&ctx->crc, data, len, out);
178     + return __chksum_finup(ctx->crc, data, len, out);
179     }
180    
181     static int chksum_digest(struct shash_desc *desc, const u8 *data,
182     unsigned int length, u8 *out)
183     {
184     - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
185     -
186     - return __chksum_finup(&ctx->crc, data, length, out);
187     + return __chksum_finup(0, data, length, out);
188     }
189    
190     static struct shash_alg alg = {
191     diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
192     index a76dc738ec61..1cf16760f5e3 100644
193     --- a/arch/x86/entry/entry_32.S
194     +++ b/arch/x86/entry/entry_32.S
195     @@ -219,6 +219,7 @@ ENTRY(__switch_to_asm)
196     pushl %ebx
197     pushl %edi
198     pushl %esi
199     + pushfl
200    
201     /* switch stack */
202     movl %esp, TASK_threadsp(%eax)
203     @@ -241,6 +242,7 @@ ENTRY(__switch_to_asm)
204     #endif
205    
206     /* restore callee-saved registers */
207     + popfl
208     popl %esi
209     popl %edi
210     popl %ebx
211     diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
212     index 870e941c1947..8252d9dc48eb 100644
213     --- a/arch/x86/entry/entry_64.S
214     +++ b/arch/x86/entry/entry_64.S
215     @@ -313,6 +313,7 @@ ENTRY(__switch_to_asm)
216     pushq %r13
217     pushq %r14
218     pushq %r15
219     + pushfq
220    
221     /* switch stack */
222     movq %rsp, TASK_threadsp(%rdi)
223     @@ -335,6 +336,7 @@ ENTRY(__switch_to_asm)
224     #endif
225    
226     /* restore callee-saved registers */
227     + popfq
228     popq %r15
229     popq %r14
230     popq %r13
231     diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
232     index 676e84f521ba..e959b8d40473 100644
233     --- a/arch/x86/include/asm/switch_to.h
234     +++ b/arch/x86/include/asm/switch_to.h
235     @@ -35,6 +35,7 @@ asmlinkage void ret_from_fork(void);
236    
237     /* data that is pointed to by thread.sp */
238     struct inactive_task_frame {
239     + unsigned long flags;
240     #ifdef CONFIG_X86_64
241     unsigned long r15;
242     unsigned long r14;
243     diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
244     index 912246fd6cd9..4ca26fc7aa89 100644
245     --- a/arch/x86/kernel/process_32.c
246     +++ b/arch/x86/kernel/process_32.c
247     @@ -129,6 +129,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
248     struct task_struct *tsk;
249     int err;
250    
251     + /*
252     + * For a new task use the RESET flags value since there is no before.
253     + * All the status flags are zero; DF and all the system flags must also
254     + * be 0, specifically IF must be 0 because we context switch to the new
255     + * task with interrupts disabled.
256     + */
257     + frame->flags = X86_EFLAGS_FIXED;
258     frame->bp = 0;
259     frame->ret_addr = (unsigned long) ret_from_fork;
260     p->thread.sp = (unsigned long) fork_frame;
261     diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
262     index 81eec65fe053..6d6c15cd9b9a 100644
263     --- a/arch/x86/kernel/process_64.c
264     +++ b/arch/x86/kernel/process_64.c
265     @@ -268,6 +268,14 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
266     childregs = task_pt_regs(p);
267     fork_frame = container_of(childregs, struct fork_frame, regs);
268     frame = &fork_frame->frame;
269     +
270     + /*
271     + * For a new task use the RESET flags value since there is no before.
272     + * All the status flags are zero; DF and all the system flags must also
273     + * be 0, specifically IF must be 0 because we context switch to the new
274     + * task with interrupts disabled.
275     + */
276     + frame->flags = X86_EFLAGS_FIXED;
277     frame->bp = 0;
278     frame->ret_addr = (unsigned long) ret_from_fork;
279     p->thread.sp = (unsigned long) fork_frame;
280     diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
281     index ef225fa8e928..5bbfa2f63b8c 100644
282     --- a/arch/x86/kernel/traps.c
283     +++ b/arch/x86/kernel/traps.c
284     @@ -62,7 +62,6 @@
285     #include <asm/alternative.h>
286     #include <asm/fpu/xstate.h>
287     #include <asm/trace/mpx.h>
288     -#include <asm/nospec-branch.h>
289     #include <asm/mpx.h>
290     #include <asm/vm86.h>
291    
292     @@ -341,13 +340,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
293     regs->ip = (unsigned long)general_protection;
294     regs->sp = (unsigned long)&normal_regs->orig_ax;
295    
296     - /*
297     - * This situation can be triggered by userspace via
298     - * modify_ldt(2) and the return does not take the regular
299     - * user space exit, so a CPU buffer clear is required when
300     - * MDS mitigation is enabled.
301     - */
302     - mds_user_clear_cpu_buffers();
303     return;
304     }
305     #endif
306     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
307     index 8285142556b5..1f32c4e32a00 100644
308     --- a/arch/x86/kvm/x86.c
309     +++ b/arch/x86/kvm/x86.c
310     @@ -1073,11 +1073,8 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
311     return 0;
312     }
313    
314     -bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
315     +static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
316     {
317     - if (efer & efer_reserved_bits)
318     - return false;
319     -
320     if (efer & EFER_FFXSR) {
321     struct kvm_cpuid_entry2 *feat;
322    
323     @@ -1095,19 +1092,33 @@ bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
324     }
325    
326     return true;
327     +
328     +}
329     +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
330     +{
331     + if (efer & efer_reserved_bits)
332     + return false;
333     +
334     + return __kvm_valid_efer(vcpu, efer);
335     }
336     EXPORT_SYMBOL_GPL(kvm_valid_efer);
337    
338     -static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
339     +static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
340     {
341     u64 old_efer = vcpu->arch.efer;
342     + u64 efer = msr_info->data;
343    
344     - if (!kvm_valid_efer(vcpu, efer))
345     - return 1;
346     + if (efer & efer_reserved_bits)
347     + return false;
348    
349     - if (is_paging(vcpu)
350     - && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
351     - return 1;
352     + if (!msr_info->host_initiated) {
353     + if (!__kvm_valid_efer(vcpu, efer))
354     + return 1;
355     +
356     + if (is_paging(vcpu) &&
357     + (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
358     + return 1;
359     + }
360    
361     efer &= ~EFER_LMA;
362     efer |= vcpu->arch.efer & EFER_LMA;
363     @@ -2203,7 +2214,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
364     vcpu->arch.arch_capabilities = data;
365     break;
366     case MSR_EFER:
367     - return set_efer(vcpu, data);
368     + return set_efer(vcpu, msr_info);
369     case MSR_K7_HWCR:
370     data &= ~(u64)0x40; /* ignore flush filter disable */
371     data &= ~(u64)0x100; /* ignore ignne emulation enable */
372     diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
373     index cb1c3a3287b0..246905bf00aa 100644
374     --- a/crypto/chacha20poly1305.c
375     +++ b/crypto/chacha20poly1305.c
376     @@ -647,8 +647,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
377    
378     err = -ENAMETOOLONG;
379     if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
380     - "%s(%s,%s)", name, chacha_name,
381     - poly_name) >= CRYPTO_MAX_ALG_NAME)
382     + "%s(%s,%s)", name, chacha->base.cra_name,
383     + poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
384     goto out_drop_chacha;
385     if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
386     "%s(%s,%s)", name, chacha->base.cra_driver_name,
387     diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
388     index 8e94e29dc6fc..d08048ae5552 100644
389     --- a/crypto/crct10dif_generic.c
390     +++ b/crypto/crct10dif_generic.c
391     @@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
392     return 0;
393     }
394    
395     -static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
396     - u8 *out)
397     +static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
398     {
399     - *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
400     + *(__u16 *)out = crc_t10dif_generic(crc, data, len);
401     return 0;
402     }
403    
404     @@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
405     {
406     struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
407    
408     - return __chksum_finup(&ctx->crc, data, len, out);
409     + return __chksum_finup(ctx->crc, data, len, out);
410     }
411    
412     static int chksum_digest(struct shash_desc *desc, const u8 *data,
413     unsigned int length, u8 *out)
414     {
415     - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
416     -
417     - return __chksum_finup(&ctx->crc, data, length, out);
418     + return __chksum_finup(0, data, length, out);
419     }
420    
421     static struct shash_alg alg = {
422     diff --git a/crypto/gcm.c b/crypto/gcm.c
423     index dd33fbd2d868..398f048c452a 100644
424     --- a/crypto/gcm.c
425     +++ b/crypto/gcm.c
426     @@ -616,7 +616,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
427    
428     static int crypto_gcm_create_common(struct crypto_template *tmpl,
429     struct rtattr **tb,
430     - const char *full_name,
431     const char *ctr_name,
432     const char *ghash_name)
433     {
434     @@ -657,7 +656,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
435     goto err_free_inst;
436    
437     err = -EINVAL;
438     - if (ghash->digestsize != 16)
439     + if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
440     + ghash->digestsize != 16)
441     goto err_drop_ghash;
442    
443     crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
444     @@ -669,24 +669,24 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
445    
446     ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
447    
448     - /* We only support 16-byte blocks. */
449     - if (crypto_skcipher_alg_ivsize(ctr) != 16)
450     - goto out_put_ctr;
451     -
452     - /* Not a stream cipher? */
453     + /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
454     err = -EINVAL;
455     - if (ctr->base.cra_blocksize != 1)
456     + if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
457     + crypto_skcipher_alg_ivsize(ctr) != 16 ||
458     + ctr->base.cra_blocksize != 1)
459     goto out_put_ctr;
460    
461     err = -ENAMETOOLONG;
462     + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
463     + "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
464     + goto out_put_ctr;
465     +
466     if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
467     "gcm_base(%s,%s)", ctr->base.cra_driver_name,
468     ghash_alg->cra_driver_name) >=
469     CRYPTO_MAX_ALG_NAME)
470     goto out_put_ctr;
471    
472     - memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
473     -
474     inst->alg.base.cra_flags = (ghash->base.cra_flags |
475     ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
476     inst->alg.base.cra_priority = (ghash->base.cra_priority +
477     @@ -728,7 +728,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
478     {
479     const char *cipher_name;
480     char ctr_name[CRYPTO_MAX_ALG_NAME];
481     - char full_name[CRYPTO_MAX_ALG_NAME];
482    
483     cipher_name = crypto_attr_alg_name(tb[1]);
484     if (IS_ERR(cipher_name))
485     @@ -738,12 +737,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
486     CRYPTO_MAX_ALG_NAME)
487     return -ENAMETOOLONG;
488    
489     - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
490     - CRYPTO_MAX_ALG_NAME)
491     - return -ENAMETOOLONG;
492     -
493     - return crypto_gcm_create_common(tmpl, tb, full_name,
494     - ctr_name, "ghash");
495     + return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
496     }
497    
498     static struct crypto_template crypto_gcm_tmpl = {
499     @@ -757,7 +751,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
500     {
501     const char *ctr_name;
502     const char *ghash_name;
503     - char full_name[CRYPTO_MAX_ALG_NAME];
504    
505     ctr_name = crypto_attr_alg_name(tb[1]);
506     if (IS_ERR(ctr_name))
507     @@ -767,12 +760,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
508     if (IS_ERR(ghash_name))
509     return PTR_ERR(ghash_name);
510    
511     - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
512     - ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
513     - return -ENAMETOOLONG;
514     -
515     - return crypto_gcm_create_common(tmpl, tb, full_name,
516     - ctr_name, ghash_name);
517     + return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
518     }
519    
520     static struct crypto_template crypto_gcm_base_tmpl = {
521     diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
522     index d7da0eea5622..319d9962552e 100644
523     --- a/crypto/salsa20_generic.c
524     +++ b/crypto/salsa20_generic.c
525     @@ -186,7 +186,7 @@ static int encrypt(struct blkcipher_desc *desc,
526     blkcipher_walk_init(&walk, dst, src, nbytes);
527     err = blkcipher_walk_virt_block(desc, &walk, 64);
528    
529     - salsa20_ivsetup(ctx, walk.iv);
530     + salsa20_ivsetup(ctx, desc->info);
531    
532     while (walk.nbytes >= 64) {
533     salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
534     diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
535     index dac36ef450ba..996b9ae15404 100644
536     --- a/drivers/char/ipmi/ipmi_ssif.c
537     +++ b/drivers/char/ipmi/ipmi_ssif.c
538     @@ -699,12 +699,16 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
539     /* End of read */
540     len = ssif_info->multi_len;
541     data = ssif_info->data;
542     - } else if (blocknum != ssif_info->multi_pos) {
543     + } else if (blocknum + 1 != ssif_info->multi_pos) {
544     /*
545     * Out of sequence block, just abort. Block
546     * numbers start at zero for the second block,
547     * but multi_pos starts at one, so the +1.
548     */
549     + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
550     + dev_dbg(&ssif_info->client->dev,
551     + "Received message out of sequence, expected %u, got %u\n",
552     + ssif_info->multi_pos - 1, blocknum);
553     result = -EIO;
554     } else {
555     ssif_inc_stat(ssif_info, received_message_parts);
556     diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
557     index 0b4a293b8a1e..d9281a28818d 100644
558     --- a/drivers/crypto/vmx/aesp8-ppc.pl
559     +++ b/drivers/crypto/vmx/aesp8-ppc.pl
560     @@ -1815,7 +1815,7 @@ Lctr32_enc8x_three:
561     stvx_u $out1,$x10,$out
562     stvx_u $out2,$x20,$out
563     addi $out,$out,0x30
564     - b Lcbc_dec8x_done
565     + b Lctr32_enc8x_done
566    
567     .align 5
568     Lctr32_enc8x_two:
569     @@ -1827,7 +1827,7 @@ Lctr32_enc8x_two:
570     stvx_u $out0,$x00,$out
571     stvx_u $out1,$x10,$out
572     addi $out,$out,0x20
573     - b Lcbc_dec8x_done
574     + b Lctr32_enc8x_done
575    
576     .align 5
577     Lctr32_enc8x_one:
578     diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
579     index 08f20b7cd199..c76a0176b5c6 100644
580     --- a/drivers/md/bcache/journal.c
581     +++ b/drivers/md/bcache/journal.c
582     @@ -513,11 +513,11 @@ static void journal_reclaim(struct cache_set *c)
583     ca->sb.nr_this_dev);
584     }
585    
586     - bkey_init(k);
587     - SET_KEY_PTRS(k, n);
588     -
589     - if (n)
590     + if (n) {
591     + bkey_init(k);
592     + SET_KEY_PTRS(k, n);
593     c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
594     + }
595     out:
596     if (!journal_full(&c->journal))
597     __closure_wake_up(&c->journal.wait);
598     @@ -642,6 +642,9 @@ static void journal_write_unlocked(struct closure *cl)
599     ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
600     }
601    
602     + /* If KEY_PTRS(k) == 0, this jset gets lost in air */
603     + BUG_ON(i == 0);
604     +
605     atomic_dec_bug(&fifo_back(&c->journal.pin));
606     bch_journal_next(&c->journal);
607     journal_reclaim(c);
608     diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
609     index 894992ae9be0..362efc8dd16f 100644
610     --- a/drivers/md/bcache/super.c
611     +++ b/drivers/md/bcache/super.c
612     @@ -1357,6 +1357,7 @@ static void cache_set_free(struct closure *cl)
613     bch_btree_cache_free(c);
614     bch_journal_free(c);
615    
616     + mutex_lock(&bch_register_lock);
617     for_each_cache(ca, c, i)
618     if (ca) {
619     ca->set = NULL;
620     @@ -1379,7 +1380,6 @@ static void cache_set_free(struct closure *cl)
621     mempool_destroy(c->search);
622     kfree(c->devices);
623    
624     - mutex_lock(&bch_register_lock);
625     list_del(&c->list);
626     mutex_unlock(&bch_register_lock);
627    
628     diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
629     index b4d8ccfd9f7c..200b41576526 100644
630     --- a/drivers/pci/host/pci-hyperv.c
631     +++ b/drivers/pci/host/pci-hyperv.c
632     @@ -1620,6 +1620,7 @@ static void hv_eject_device_work(struct work_struct *work)
633     spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
634    
635     put_pcichild(hpdev, hv_pcidev_ref_childlist);
636     + put_pcichild(hpdev, hv_pcidev_ref_initial);
637     put_pcichild(hpdev, hv_pcidev_ref_pnp);
638     put_hvpcibus(hpdev->hbus);
639     }
640     diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
641     index 75b8e0c7402b..8a0a8fb915d6 100644
642     --- a/drivers/power/supply/axp288_charger.c
643     +++ b/drivers/power/supply/axp288_charger.c
644     @@ -899,6 +899,10 @@ static int axp288_charger_probe(struct platform_device *pdev)
645     /* Register charger interrupts */
646     for (i = 0; i < CHRG_INTR_END; i++) {
647     pirq = platform_get_irq(info->pdev, i);
648     + if (pirq < 0) {
649     + dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
650     + return pirq;
651     + }
652     info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
653     if (info->irq[i] < 0) {
654     dev_warn(&info->pdev->dev,
655     diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
656     index ece10e6b731b..e8a917a23ed9 100644
657     --- a/drivers/tty/vt/keyboard.c
658     +++ b/drivers/tty/vt/keyboard.c
659     @@ -121,6 +121,7 @@ static const int NR_TYPES = ARRAY_SIZE(max_vals);
660     static struct input_handler kbd_handler;
661     static DEFINE_SPINLOCK(kbd_event_lock);
662     static DEFINE_SPINLOCK(led_lock);
663     +static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */
664     static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
665     static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
666     static bool dead_key_next;
667     @@ -1959,11 +1960,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
668     char *p;
669     u_char *q;
670     u_char __user *up;
671     - int sz;
672     + int sz, fnw_sz;
673     int delta;
674     char *first_free, *fj, *fnw;
675     int i, j, k;
676     int ret;
677     + unsigned long flags;
678    
679     if (!capable(CAP_SYS_TTY_CONFIG))
680     perm = 0;
681     @@ -2006,7 +2008,14 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
682     goto reterr;
683     }
684    
685     + fnw = NULL;
686     + fnw_sz = 0;
687     + /* race aginst other writers */
688     + again:
689     + spin_lock_irqsave(&func_buf_lock, flags);
690     q = func_table[i];
691     +
692     + /* fj pointer to next entry after 'q' */
693     first_free = funcbufptr + (funcbufsize - funcbufleft);
694     for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
695     ;
696     @@ -2014,10 +2023,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
697     fj = func_table[j];
698     else
699     fj = first_free;
700     -
701     + /* buffer usage increase by new entry */
702     delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string);
703     +
704     if (delta <= funcbufleft) { /* it fits in current buf */
705     if (j < MAX_NR_FUNC) {
706     + /* make enough space for new entry at 'fj' */
707     memmove(fj + delta, fj, first_free - fj);
708     for (k = j; k < MAX_NR_FUNC; k++)
709     if (func_table[k])
710     @@ -2030,20 +2041,28 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
711     sz = 256;
712     while (sz < funcbufsize - funcbufleft + delta)
713     sz <<= 1;
714     - fnw = kmalloc(sz, GFP_KERNEL);
715     - if(!fnw) {
716     - ret = -ENOMEM;
717     - goto reterr;
718     + if (fnw_sz != sz) {
719     + spin_unlock_irqrestore(&func_buf_lock, flags);
720     + kfree(fnw);
721     + fnw = kmalloc(sz, GFP_KERNEL);
722     + fnw_sz = sz;
723     + if (!fnw) {
724     + ret = -ENOMEM;
725     + goto reterr;
726     + }
727     + goto again;
728     }
729    
730     if (!q)
731     func_table[i] = fj;
732     + /* copy data before insertion point to new location */
733     if (fj > funcbufptr)
734     memmove(fnw, funcbufptr, fj - funcbufptr);
735     for (k = 0; k < j; k++)
736     if (func_table[k])
737     func_table[k] = fnw + (func_table[k] - funcbufptr);
738    
739     + /* copy data after insertion point to new location */
740     if (first_free > fj) {
741     memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj);
742     for (k = j; k < MAX_NR_FUNC; k++)
743     @@ -2056,7 +2075,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
744     funcbufleft = funcbufleft - delta + sz - funcbufsize;
745     funcbufsize = sz;
746     }
747     + /* finally insert item itself */
748     strcpy(func_table[i], kbs->kb_string);
749     + spin_unlock_irqrestore(&func_buf_lock, flags);
750     break;
751     }
752     ret = 0;
753     diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
754     index 85dc7ab8f89e..2973d256bb44 100644
755     --- a/fs/btrfs/backref.c
756     +++ b/fs/btrfs/backref.c
757     @@ -2018,13 +2018,19 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
758     extent_item_objectid);
759    
760     if (!search_commit_root) {
761     - trans = btrfs_join_transaction(fs_info->extent_root);
762     - if (IS_ERR(trans))
763     - return PTR_ERR(trans);
764     + trans = btrfs_attach_transaction(fs_info->extent_root);
765     + if (IS_ERR(trans)) {
766     + if (PTR_ERR(trans) != -ENOENT &&
767     + PTR_ERR(trans) != -EROFS)
768     + return PTR_ERR(trans);
769     + trans = NULL;
770     + }
771     + }
772     +
773     + if (trans)
774     btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
775     - } else {
776     + else
777     down_read(&fs_info->commit_root_sem);
778     - }
779    
780     ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
781     tree_mod_seq_elem.seq, &refs,
782     @@ -2056,7 +2062,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
783    
784     free_leaf_list(refs);
785     out:
786     - if (!search_commit_root) {
787     + if (trans) {
788     btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
789     btrfs_end_transaction(trans, fs_info->extent_root);
790     } else {
791     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
792     index 106a5bb3ae68..b2ba9955fa11 100644
793     --- a/fs/ext4/extents.c
794     +++ b/fs/ext4/extents.c
795     @@ -1047,6 +1047,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
796     __le32 border;
797     ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
798     int err = 0;
799     + size_t ext_size = 0;
800    
801     /* make decision: where to split? */
802     /* FIXME: now decision is simplest: at current extent */
803     @@ -1138,6 +1139,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
804     le16_add_cpu(&neh->eh_entries, m);
805     }
806    
807     + /* zero out unused area in the extent block */
808     + ext_size = sizeof(struct ext4_extent_header) +
809     + sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
810     + memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
811     ext4_extent_block_csum_set(inode, neh);
812     set_buffer_uptodate(bh);
813     unlock_buffer(bh);
814     @@ -1217,6 +1222,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
815     sizeof(struct ext4_extent_idx) * m);
816     le16_add_cpu(&neh->eh_entries, m);
817     }
818     + /* zero out unused area in the extent block */
819     + ext_size = sizeof(struct ext4_extent_header) +
820     + (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
821     + memset(bh->b_data + ext_size, 0,
822     + inode->i_sb->s_blocksize - ext_size);
823     ext4_extent_block_csum_set(inode, neh);
824     set_buffer_uptodate(bh);
825     unlock_buffer(bh);
826     @@ -1282,6 +1292,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
827     ext4_fsblk_t newblock, goal = 0;
828     struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
829     int err = 0;
830     + size_t ext_size = 0;
831    
832     /* Try to prepend new index to old one */
833     if (ext_depth(inode))
834     @@ -1307,9 +1318,11 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
835     goto out;
836     }
837    
838     + ext_size = sizeof(EXT4_I(inode)->i_data);
839     /* move top-level index/leaf into new block */
840     - memmove(bh->b_data, EXT4_I(inode)->i_data,
841     - sizeof(EXT4_I(inode)->i_data));
842     + memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
843     + /* zero out unused area in the extent block */
844     + memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
845    
846     /* set size of new block */
847     neh = ext_block_hdr(bh);
848     diff --git a/fs/ext4/file.c b/fs/ext4/file.c
849     index fe76d0957a1f..59d3ea7094a0 100644
850     --- a/fs/ext4/file.c
851     +++ b/fs/ext4/file.c
852     @@ -163,6 +163,13 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
853     }
854    
855     ret = __generic_file_write_iter(iocb, from);
856     + /*
857     + * Unaligned direct AIO must be the only IO in flight. Otherwise
858     + * overlapping aligned IO after unaligned might result in data
859     + * corruption.
860     + */
861     + if (ret == -EIOCBQUEUED && unaligned_aio)
862     + ext4_unwritten_wait(inode);
863     inode_unlock(inode);
864    
865     if (ret > 0)
866     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
867     index 2ce73287b53c..baa2f6375226 100644
868     --- a/fs/ext4/ioctl.c
869     +++ b/fs/ext4/ioctl.c
870     @@ -727,7 +727,7 @@ group_add_out:
871     if (err == 0)
872     err = err2;
873     mnt_drop_write_file(filp);
874     - if (!err && (o_group > EXT4_SB(sb)->s_groups_count) &&
875     + if (!err && (o_group < EXT4_SB(sb)->s_groups_count) &&
876     ext4_has_group_desc_csum(sb) &&
877     test_opt(sb, INIT_INODE_TABLE))
878     err = ext4_register_li_request(sb, o_group);
879     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
880     index a6c7ace9cfd1..3261478bfc32 100644
881     --- a/fs/ext4/super.c
882     +++ b/fs/ext4/super.c
883     @@ -4034,7 +4034,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
884     "data=, fs mounted w/o journal");
885     goto failed_mount_wq;
886     }
887     - sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
888     + sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
889     clear_opt(sb, JOURNAL_CHECKSUM);
890     clear_opt(sb, DATA_FLAGS);
891     sbi->s_journal = NULL;
892     diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
893     index f3aea1b8702c..8b93d4b98428 100644
894     --- a/fs/fs-writeback.c
895     +++ b/fs/fs-writeback.c
896     @@ -331,11 +331,22 @@ struct inode_switch_wbs_context {
897     struct work_struct work;
898     };
899    
900     +static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
901     +{
902     + down_write(&bdi->wb_switch_rwsem);
903     +}
904     +
905     +static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
906     +{
907     + up_write(&bdi->wb_switch_rwsem);
908     +}
909     +
910     static void inode_switch_wbs_work_fn(struct work_struct *work)
911     {
912     struct inode_switch_wbs_context *isw =
913     container_of(work, struct inode_switch_wbs_context, work);
914     struct inode *inode = isw->inode;
915     + struct backing_dev_info *bdi = inode_to_bdi(inode);
916     struct address_space *mapping = inode->i_mapping;
917     struct bdi_writeback *old_wb = inode->i_wb;
918     struct bdi_writeback *new_wb = isw->new_wb;
919     @@ -343,6 +354,12 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
920     bool switched = false;
921     void **slot;
922    
923     + /*
924     + * If @inode switches cgwb membership while sync_inodes_sb() is
925     + * being issued, sync_inodes_sb() might miss it. Synchronize.
926     + */
927     + down_read(&bdi->wb_switch_rwsem);
928     +
929     /*
930     * By the time control reaches here, RCU grace period has passed
931     * since I_WB_SWITCH assertion and all wb stat update transactions
932     @@ -435,6 +452,8 @@ skip_switch:
933     spin_unlock(&new_wb->list_lock);
934     spin_unlock(&old_wb->list_lock);
935    
936     + up_read(&bdi->wb_switch_rwsem);
937     +
938     if (switched) {
939     wb_wakeup(new_wb);
940     wb_put(old_wb);
941     @@ -475,9 +494,18 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
942     if (inode->i_state & I_WB_SWITCH)
943     return;
944    
945     + /*
946     + * Avoid starting new switches while sync_inodes_sb() is in
947     + * progress. Otherwise, if the down_write protected issue path
948     + * blocks heavily, we might end up starting a large number of
949     + * switches which will block on the rwsem.
950     + */
951     + if (!down_read_trylock(&bdi->wb_switch_rwsem))
952     + return;
953     +
954     isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
955     if (!isw)
956     - return;
957     + goto out_unlock;
958    
959     /* find and pin the new wb */
960     rcu_read_lock();
961     @@ -502,8 +530,6 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
962    
963     isw->inode = inode;
964    
965     - atomic_inc(&isw_nr_in_flight);
966     -
967     /*
968     * In addition to synchronizing among switchers, I_WB_SWITCH tells
969     * the RCU protected stat update paths to grab the mapping's
970     @@ -511,12 +537,17 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
971     * Let's continue after I_WB_SWITCH is guaranteed to be visible.
972     */
973     call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
974     - return;
975     +
976     + atomic_inc(&isw_nr_in_flight);
977     +
978     + goto out_unlock;
979    
980     out_free:
981     if (isw->new_wb)
982     wb_put(isw->new_wb);
983     kfree(isw);
984     +out_unlock:
985     + up_read(&bdi->wb_switch_rwsem);
986     }
987    
988     /**
989     @@ -878,7 +909,11 @@ restart:
990     void cgroup_writeback_umount(void)
991     {
992     if (atomic_read(&isw_nr_in_flight)) {
993     - synchronize_rcu();
994     + /*
995     + * Use rcu_barrier() to wait for all pending callbacks to
996     + * ensure that all in-flight wb switches are in the workqueue.
997     + */
998     + rcu_barrier();
999     flush_workqueue(isw_wq);
1000     }
1001     }
1002     @@ -894,6 +929,9 @@ fs_initcall(cgroup_writeback_init);
1003    
1004     #else /* CONFIG_CGROUP_WRITEBACK */
1005    
1006     +static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1007     +static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1008     +
1009     static struct bdi_writeback *
1010     locked_inode_to_wb_and_lock_list(struct inode *inode)
1011     __releases(&inode->i_lock)
1012     @@ -2408,8 +2446,11 @@ void sync_inodes_sb(struct super_block *sb)
1013     return;
1014     WARN_ON(!rwsem_is_locked(&sb->s_umount));
1015    
1016     + /* protect against inode wb switch, see inode_switch_wbs_work_fn() */
1017     + bdi_down_write_wb_switch_rwsem(bdi);
1018     bdi_split_work_to_wbs(bdi, &work, false);
1019     wb_wait_for_completion(bdi, &done);
1020     + bdi_up_write_wb_switch_rwsem(bdi);
1021    
1022     wait_sb_inodes(sb);
1023     }
1024     diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
1025     index d10bb2c30bf8..3cbcf649ac66 100644
1026     --- a/fs/jbd2/journal.c
1027     +++ b/fs/jbd2/journal.c
1028     @@ -1339,6 +1339,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
1029     journal_superblock_t *sb = journal->j_superblock;
1030     int ret;
1031    
1032     + /* Buffer got discarded which means block device got invalidated */
1033     + if (!buffer_mapped(bh))
1034     + return -EIO;
1035     +
1036     trace_jbd2_write_superblock(journal, write_flags);
1037     if (!(journal->j_flags & JBD2_BARRIER))
1038     write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
1039     diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
1040     index 3494e220b510..bed15dec3c16 100644
1041     --- a/fs/ocfs2/export.c
1042     +++ b/fs/ocfs2/export.c
1043     @@ -148,16 +148,24 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
1044     u64 blkno;
1045     struct dentry *parent;
1046     struct inode *dir = d_inode(child);
1047     + int set;
1048    
1049     trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
1050     (unsigned long long)OCFS2_I(dir)->ip_blkno);
1051    
1052     + status = ocfs2_nfs_sync_lock(OCFS2_SB(dir->i_sb), 1);
1053     + if (status < 0) {
1054     + mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status);
1055     + parent = ERR_PTR(status);
1056     + goto bail;
1057     + }
1058     +
1059     status = ocfs2_inode_lock(dir, NULL, 0);
1060     if (status < 0) {
1061     if (status != -ENOENT)
1062     mlog_errno(status);
1063     parent = ERR_PTR(status);
1064     - goto bail;
1065     + goto unlock_nfs_sync;
1066     }
1067    
1068     status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
1069     @@ -166,11 +174,31 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
1070     goto bail_unlock;
1071     }
1072    
1073     + status = ocfs2_test_inode_bit(OCFS2_SB(dir->i_sb), blkno, &set);
1074     + if (status < 0) {
1075     + if (status == -EINVAL) {
1076     + status = -ESTALE;
1077     + } else
1078     + mlog(ML_ERROR, "test inode bit failed %d\n", status);
1079     + parent = ERR_PTR(status);
1080     + goto bail_unlock;
1081     + }
1082     +
1083     + trace_ocfs2_get_dentry_test_bit(status, set);
1084     + if (!set) {
1085     + status = -ESTALE;
1086     + parent = ERR_PTR(status);
1087     + goto bail_unlock;
1088     + }
1089     +
1090     parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
1091    
1092     bail_unlock:
1093     ocfs2_inode_unlock(dir, 0);
1094    
1095     +unlock_nfs_sync:
1096     + ocfs2_nfs_sync_unlock(OCFS2_SB(dir->i_sb), 1);
1097     +
1098     bail:
1099     trace_ocfs2_get_parent_end(parent);
1100    
1101     diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
1102     index 4ea779b25a51..34056ec64c7c 100644
1103     --- a/include/linux/backing-dev-defs.h
1104     +++ b/include/linux/backing-dev-defs.h
1105     @@ -157,6 +157,7 @@ struct backing_dev_info {
1106     struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
1107     struct rb_root cgwb_congested_tree; /* their congested states */
1108     atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
1109     + struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
1110     #else
1111     struct bdi_writeback_congested *wb_congested;
1112     #endif
1113     diff --git a/include/linux/list.h b/include/linux/list.h
1114     index 5809e9a2de5b..6f935018ea05 100644
1115     --- a/include/linux/list.h
1116     +++ b/include/linux/list.h
1117     @@ -271,6 +271,36 @@ static inline void list_cut_position(struct list_head *list,
1118     __list_cut_position(list, head, entry);
1119     }
1120    
1121     +/**
1122     + * list_cut_before - cut a list into two, before given entry
1123     + * @list: a new list to add all removed entries
1124     + * @head: a list with entries
1125     + * @entry: an entry within head, could be the head itself
1126     + *
1127     + * This helper moves the initial part of @head, up to but
1128     + * excluding @entry, from @head to @list. You should pass
1129     + * in @entry an element you know is on @head. @list should
1130     + * be an empty list or a list you do not care about losing
1131     + * its data.
1132     + * If @entry == @head, all entries on @head are moved to
1133     + * @list.
1134     + */
1135     +static inline void list_cut_before(struct list_head *list,
1136     + struct list_head *head,
1137     + struct list_head *entry)
1138     +{
1139     + if (head->next == entry) {
1140     + INIT_LIST_HEAD(list);
1141     + return;
1142     + }
1143     + list->next = head->next;
1144     + list->next->prev = list;
1145     + list->prev = entry->prev;
1146     + list->prev->next = list;
1147     + head->next = entry;
1148     + entry->prev = head;
1149     +}
1150     +
1151     static inline void __list_splice(const struct list_head *list,
1152     struct list_head *prev,
1153     struct list_head *next)
1154     diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
1155     index 5d42859cb441..844fc2973392 100644
1156     --- a/include/linux/mfd/da9063/registers.h
1157     +++ b/include/linux/mfd/da9063/registers.h
1158     @@ -215,9 +215,9 @@
1159    
1160     /* DA9063 Configuration registers */
1161     /* OTP */
1162     -#define DA9063_REG_OPT_COUNT 0x101
1163     -#define DA9063_REG_OPT_ADDR 0x102
1164     -#define DA9063_REG_OPT_DATA 0x103
1165     +#define DA9063_REG_OTP_CONT 0x101
1166     +#define DA9063_REG_OTP_ADDR 0x102
1167     +#define DA9063_REG_OTP_DATA 0x103
1168    
1169     /* Customer Trim and Configuration */
1170     #define DA9063_REG_T_OFFSET 0x104
1171     diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
1172     index 3ca0af07fc78..0a68dc8fc25f 100644
1173     --- a/include/linux/mfd/max77620.h
1174     +++ b/include/linux/mfd/max77620.h
1175     @@ -136,8 +136,8 @@
1176     #define MAX77620_FPS_PERIOD_MIN_US 40
1177     #define MAX20024_FPS_PERIOD_MIN_US 20
1178    
1179     -#define MAX77620_FPS_PERIOD_MAX_US 2560
1180     -#define MAX20024_FPS_PERIOD_MAX_US 5120
1181     +#define MAX20024_FPS_PERIOD_MAX_US 2560
1182     +#define MAX77620_FPS_PERIOD_MAX_US 5120
1183    
1184     #define MAX77620_REG_FPS_GPIO1 0x54
1185     #define MAX77620_REG_FPS_GPIO2 0x55
1186     diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
1187     index be06c45cbe4f..0cdbb636e316 100644
1188     --- a/kernel/locking/rwsem-xadd.c
1189     +++ b/kernel/locking/rwsem-xadd.c
1190     @@ -127,6 +127,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
1191     {
1192     struct rwsem_waiter *waiter, *tmp;
1193     long oldcount, woken = 0, adjustment = 0;
1194     + struct list_head wlist;
1195    
1196     /*
1197     * Take a peek at the queue head waiter such that we can determine
1198     @@ -185,18 +186,42 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
1199     * of the queue. We know that woken will be at least 1 as we accounted
1200     * for above. Note we increment the 'active part' of the count by the
1201     * number of readers before waking any processes up.
1202     + *
1203     + * We have to do wakeup in 2 passes to prevent the possibility that
1204     + * the reader count may be decremented before it is incremented. It
1205     + * is because the to-be-woken waiter may not have slept yet. So it
1206     + * may see waiter->task got cleared, finish its critical section and
1207     + * do an unlock before the reader count increment.
1208     + *
1209     + * 1) Collect the read-waiters in a separate list, count them and
1210     + * fully increment the reader count in rwsem.
1211     + * 2) For each waiters in the new list, clear waiter->task and
1212     + * put them into wake_q to be woken up later.
1213     */
1214     - list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
1215     - struct task_struct *tsk;
1216     -
1217     + list_for_each_entry(waiter, &sem->wait_list, list) {
1218     if (waiter->type == RWSEM_WAITING_FOR_WRITE)
1219     break;
1220    
1221     woken++;
1222     - tsk = waiter->task;
1223     + }
1224     + list_cut_before(&wlist, &sem->wait_list, &waiter->list);
1225     +
1226     + adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
1227     + if (list_empty(&sem->wait_list)) {
1228     + /* hit end of list above */
1229     + adjustment -= RWSEM_WAITING_BIAS;
1230     + }
1231     +
1232     + if (adjustment)
1233     + atomic_long_add(adjustment, &sem->count);
1234     +
1235     + /* 2nd pass */
1236     + list_for_each_entry_safe(waiter, tmp, &wlist, list) {
1237     + struct task_struct *tsk;
1238    
1239     + tsk = waiter->task;
1240     get_task_struct(tsk);
1241     - list_del(&waiter->list);
1242     +
1243     /*
1244     * Ensure calling get_task_struct() before setting the reader
1245     * waiter to nil such that rwsem_down_read_failed() cannot
1246     @@ -212,15 +237,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
1247     /* wake_q_add() already take the task ref */
1248     put_task_struct(tsk);
1249     }
1250     -
1251     - adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
1252     - if (list_empty(&sem->wait_list)) {
1253     - /* hit end of list above */
1254     - adjustment -= RWSEM_WAITING_BIAS;
1255     - }
1256     -
1257     - if (adjustment)
1258     - atomic_long_add(adjustment, &sem->count);
1259     }
1260    
1261     /*
1262     diff --git a/mm/backing-dev.c b/mm/backing-dev.c
1263     index 6ff2d7744223..113b7d317079 100644
1264     --- a/mm/backing-dev.c
1265     +++ b/mm/backing-dev.c
1266     @@ -669,6 +669,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
1267     INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
1268     bdi->cgwb_congested_tree = RB_ROOT;
1269     atomic_set(&bdi->usage_cnt, 1);
1270     + init_rwsem(&bdi->wb_switch_rwsem);
1271    
1272     ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
1273     if (!ret) {
1274     diff --git a/mm/mincore.c b/mm/mincore.c
1275     index bfb866435478..3b6a883d0926 100644
1276     --- a/mm/mincore.c
1277     +++ b/mm/mincore.c
1278     @@ -167,6 +167,22 @@ out:
1279     return 0;
1280     }
1281    
1282     +static inline bool can_do_mincore(struct vm_area_struct *vma)
1283     +{
1284     + if (vma_is_anonymous(vma))
1285     + return true;
1286     + if (!vma->vm_file)
1287     + return false;
1288     + /*
1289     + * Reveal pagecache information only for non-anonymous mappings that
1290     + * correspond to the files the calling process could (if tried) open
1291     + * for writing; otherwise we'd be including shared non-exclusive
1292     + * mappings, which opens a side channel.
1293     + */
1294     + return inode_owner_or_capable(file_inode(vma->vm_file)) ||
1295     + inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
1296     +}
1297     +
1298     /*
1299     * Do a chunk of "sys_mincore()". We've already checked
1300     * all the arguments, we hold the mmap semaphore: we should
1301     @@ -187,8 +203,13 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
1302     vma = find_vma(current->mm, addr);
1303     if (!vma || addr < vma->vm_start)
1304     return -ENOMEM;
1305     - mincore_walk.mm = vma->vm_mm;
1306     end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
1307     + if (!can_do_mincore(vma)) {
1308     + unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
1309     + memset(vec, 1, pages);
1310     + return pages;
1311     + }
1312     + mincore_walk.mm = vma->vm_mm;
1313     err = walk_page_range(addr, end, &mincore_walk);
1314     if (err < 0)
1315     return err;
1316     diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
1317     index bb26457e8c21..c03dd2104d33 100644
1318     --- a/net/core/fib_rules.c
1319     +++ b/net/core/fib_rules.c
1320     @@ -430,6 +430,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh)
1321     goto errout_free;
1322    
1323     if (rule_exists(ops, frh, tb, rule)) {
1324     + err = 0;
1325     if (nlh->nlmsg_flags & NLM_F_EXCL)
1326     err = -EEXIST;
1327     goto errout_free;
1328     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
1329     index 76ae627e3f93..7a2943a338bf 100644
1330     --- a/sound/pci/hda/patch_hdmi.c
1331     +++ b/sound/pci/hda/patch_hdmi.c
1332     @@ -1447,9 +1447,11 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
1333     ret = !repoll || !eld->monitor_present || eld->eld_valid;
1334    
1335     jack = snd_hda_jack_tbl_get(codec, pin_nid);
1336     - if (jack)
1337     + if (jack) {
1338     jack->block_report = !ret;
1339     -
1340     + jack->pin_sense = (eld->monitor_present && eld->eld_valid) ?
1341     + AC_PINSENSE_PRESENCE : 0;
1342     + }
1343     mutex_unlock(&per_pin->lock);
1344     return ret;
1345     }
1346     @@ -1554,6 +1556,11 @@ static void hdmi_repoll_eld(struct work_struct *work)
1347     container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
1348     struct hda_codec *codec = per_pin->codec;
1349     struct hdmi_spec *spec = codec->spec;
1350     + struct hda_jack_tbl *jack;
1351     +
1352     + jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
1353     + if (jack)
1354     + jack->jack_dirty = 1;
1355    
1356     if (per_pin->repoll_count++ > 6)
1357     per_pin->repoll_count = 0;
1358     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1359     index 0fc05ebdf81a..822650d907fa 100644
1360     --- a/sound/pci/hda/patch_realtek.c
1361     +++ b/sound/pci/hda/patch_realtek.c
1362     @@ -773,11 +773,10 @@ static int alc_init(struct hda_codec *codec)
1363     if (spec->init_hook)
1364     spec->init_hook(codec);
1365    
1366     + snd_hda_gen_init(codec);
1367     alc_fix_pll(codec);
1368     alc_auto_init_amp(codec, spec->init_amp);
1369    
1370     - snd_hda_gen_init(codec);
1371     -
1372     snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
1373    
1374     return 0;
1375     @@ -5855,7 +5854,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1376     SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
1377     SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
1378     SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
1379     - SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
1380     + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
1381     SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
1382     SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
1383     SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
1384     diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
1385     index 584aab83e478..3e65dc74eb33 100644
1386     --- a/sound/soc/codecs/max98090.c
1387     +++ b/sound/soc/codecs/max98090.c
1388     @@ -1209,14 +1209,14 @@ static const struct snd_soc_dapm_widget max98090_dapm_widgets[] = {
1389     &max98090_right_rcv_mixer_controls[0],
1390     ARRAY_SIZE(max98090_right_rcv_mixer_controls)),
1391    
1392     - SND_SOC_DAPM_MUX("LINMOD Mux", M98090_REG_LOUTR_MIXER,
1393     - M98090_LINMOD_SHIFT, 0, &max98090_linmod_mux),
1394     + SND_SOC_DAPM_MUX("LINMOD Mux", SND_SOC_NOPM, 0, 0,
1395     + &max98090_linmod_mux),
1396    
1397     - SND_SOC_DAPM_MUX("MIXHPLSEL Mux", M98090_REG_HP_CONTROL,
1398     - M98090_MIXHPLSEL_SHIFT, 0, &max98090_mixhplsel_mux),
1399     + SND_SOC_DAPM_MUX("MIXHPLSEL Mux", SND_SOC_NOPM, 0, 0,
1400     + &max98090_mixhplsel_mux),
1401    
1402     - SND_SOC_DAPM_MUX("MIXHPRSEL Mux", M98090_REG_HP_CONTROL,
1403     - M98090_MIXHPRSEL_SHIFT, 0, &max98090_mixhprsel_mux),
1404     + SND_SOC_DAPM_MUX("MIXHPRSEL Mux", SND_SOC_NOPM, 0, 0,
1405     + &max98090_mixhprsel_mux),
1406    
1407     SND_SOC_DAPM_PGA("HP Left Out", M98090_REG_OUTPUT_ENABLE,
1408     M98090_HPLEN_SHIFT, 0, NULL, 0),
1409     diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
1410     index 91879ea95415..01aa75cde571 100644
1411     --- a/sound/soc/codecs/rt5677-spi.c
1412     +++ b/sound/soc/codecs/rt5677-spi.c
1413     @@ -60,13 +60,15 @@ static DEFINE_MUTEX(spi_mutex);
1414     * RT5677_SPI_READ/WRITE_32: Transfer 4 bytes
1415     * RT5677_SPI_READ/WRITE_BURST: Transfer any multiples of 8 bytes
1416     *
1417     - * For example, reading 260 bytes at 0x60030002 uses the following commands:
1418     - * 0x60030002 RT5677_SPI_READ_16 2 bytes
1419     + * Note:
1420     + * 16 Bit writes and reads are restricted to the address range
1421     + * 0x18020000 ~ 0x18021000
1422     + *
1423     + * For example, reading 256 bytes at 0x60030004 uses the following commands:
1424     * 0x60030004 RT5677_SPI_READ_32 4 bytes
1425     * 0x60030008 RT5677_SPI_READ_BURST 240 bytes
1426     * 0x600300F8 RT5677_SPI_READ_BURST 8 bytes
1427     * 0x60030100 RT5677_SPI_READ_32 4 bytes
1428     - * 0x60030104 RT5677_SPI_READ_16 2 bytes
1429     *
1430     * Input:
1431     * @read: true for read commands; false for write commands
1432     @@ -81,15 +83,13 @@ static u8 rt5677_spi_select_cmd(bool read, u32 align, u32 remain, u32 *len)
1433     {
1434     u8 cmd;
1435    
1436     - if (align == 2 || align == 6 || remain == 2) {
1437     - cmd = RT5677_SPI_READ_16;
1438     - *len = 2;
1439     - } else if (align == 4 || remain <= 6) {
1440     + if (align == 4 || remain <= 4) {
1441     cmd = RT5677_SPI_READ_32;
1442     *len = 4;
1443     } else {
1444     cmd = RT5677_SPI_READ_BURST;
1445     - *len = min_t(u32, remain & ~7, RT5677_SPI_BURST_LEN);
1446     + *len = (((remain - 1) >> 3) + 1) << 3;
1447     + *len = min_t(u32, *len, RT5677_SPI_BURST_LEN);
1448     }
1449     return read ? cmd : cmd + 1;
1450     }
1451     @@ -110,7 +110,7 @@ static void rt5677_spi_reverse(u8 *dst, u32 dstlen, const u8 *src, u32 srclen)
1452     }
1453     }
1454    
1455     -/* Read DSP address space using SPI. addr and len have to be 2-byte aligned. */
1456     +/* Read DSP address space using SPI. addr and len have to be 4-byte aligned. */
1457     int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
1458     {
1459     u32 offset;
1460     @@ -126,7 +126,7 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
1461     if (!g_spi)
1462     return -ENODEV;
1463    
1464     - if ((addr & 1) || (len & 1)) {
1465     + if ((addr & 3) || (len & 3)) {
1466     dev_err(&g_spi->dev, "Bad read align 0x%x(%zu)\n", addr, len);
1467     return -EACCES;
1468     }
1469     @@ -161,13 +161,13 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
1470     }
1471     EXPORT_SYMBOL_GPL(rt5677_spi_read);
1472    
1473     -/* Write DSP address space using SPI. addr has to be 2-byte aligned.
1474     - * If len is not 2-byte aligned, an extra byte of zero is written at the end
1475     +/* Write DSP address space using SPI. addr has to be 4-byte aligned.
1476     + * If len is not 4-byte aligned, then extra zeros are written at the end
1477     * as padding.
1478     */
1479     int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
1480     {
1481     - u32 offset, len_with_pad = len;
1482     + u32 offset;
1483     int status = 0;
1484     struct spi_transfer t;
1485     struct spi_message m;
1486     @@ -180,22 +180,19 @@ int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
1487     if (!g_spi)
1488     return -ENODEV;
1489    
1490     - if (addr & 1) {
1491     + if (addr & 3) {
1492     dev_err(&g_spi->dev, "Bad write align 0x%x(%zu)\n", addr, len);
1493     return -EACCES;
1494     }
1495    
1496     - if (len & 1)
1497     - len_with_pad = len + 1;
1498     -
1499     memset(&t, 0, sizeof(t));
1500     t.tx_buf = buf;
1501     t.speed_hz = RT5677_SPI_FREQ;
1502     spi_message_init_with_transfers(&m, &t, 1);
1503    
1504     - for (offset = 0; offset < len_with_pad;) {
1505     + for (offset = 0; offset < len;) {
1506     spi_cmd = rt5677_spi_select_cmd(false, (addr + offset) & 7,
1507     - len_with_pad - offset, &t.len);
1508     + len - offset, &t.len);
1509    
1510     /* Construct SPI message header */
1511     buf[0] = spi_cmd;
1512     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
1513     index 64b90b8ec661..248a4bd82397 100644
1514     --- a/sound/usb/mixer.c
1515     +++ b/sound/usb/mixer.c
1516     @@ -2178,6 +2178,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
1517     kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval);
1518     if (! kctl) {
1519     usb_audio_err(state->chip, "cannot malloc kcontrol\n");
1520     + for (i = 0; i < desc->bNrInPins; i++)
1521     + kfree(namelist[i]);
1522     kfree(namelist);
1523     kfree(cval);
1524     return -ENOMEM;
1525     diff --git a/tools/objtool/check.c b/tools/objtool/check.c
1526     index 3ff025b64527..ae3446768181 100644
1527     --- a/tools/objtool/check.c
1528     +++ b/tools/objtool/check.c
1529     @@ -1779,7 +1779,8 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
1530     return 1;
1531     }
1532    
1533     - func = insn->func ? insn->func->pfunc : NULL;
1534     + if (insn->func)
1535     + func = insn->func->pfunc;
1536    
1537     if (func && insn->ignore) {
1538     WARN_FUNC("BUG: why am I validating an ignored function?",