Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0144-4.19.45-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3423 - (hide annotations) (download)
Fri Aug 2 11:47:52 2019 UTC (4 years, 10 months ago) by niro
File size: 139914 byte(s)
-linux-4.19.45
1 niro 3423 diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
2     index 534e9baa4e1d..5d4330be200f 100644
3     --- a/Documentation/x86/mds.rst
4     +++ b/Documentation/x86/mds.rst
5     @@ -142,45 +142,13 @@ Mitigation points
6     mds_user_clear.
7    
8     The mitigation is invoked in prepare_exit_to_usermode() which covers
9     - most of the kernel to user space transitions. There are a few exceptions
10     - which are not invoking prepare_exit_to_usermode() on return to user
11     - space. These exceptions use the paranoid exit code.
12     + all but one of the kernel to user space transitions. The exception
13     + is when we return from a Non Maskable Interrupt (NMI), which is
14     + handled directly in do_nmi().
15    
16     - - Non Maskable Interrupt (NMI):
17     -
18     - Access to sensible data like keys, credentials in the NMI context is
19     - mostly theoretical: The CPU can do prefetching or execute a
20     - misspeculated code path and thereby fetching data which might end up
21     - leaking through a buffer.
22     -
23     - But for mounting other attacks the kernel stack address of the task is
24     - already valuable information. So in full mitigation mode, the NMI is
25     - mitigated on the return from do_nmi() to provide almost complete
26     - coverage.
27     -
28     - - Double fault (#DF):
29     -
30     - A double fault is usually fatal, but the ESPFIX workaround, which can
31     - be triggered from user space through modify_ldt(2) is a recoverable
32     - double fault. #DF uses the paranoid exit path, so explicit mitigation
33     - in the double fault handler is required.
34     -
35     - - Machine Check Exception (#MC):
36     -
37     - Another corner case is a #MC which hits between the CPU buffer clear
38     - invocation and the actual return to user. As this still is in kernel
39     - space it takes the paranoid exit path which does not clear the CPU
40     - buffers. So the #MC handler repopulates the buffers to some
41     - extent. Machine checks are not reliably controllable and the window is
42     - extremly small so mitigation would just tick a checkbox that this
43     - theoretical corner case is covered. To keep the amount of special
44     - cases small, ignore #MC.
45     -
46     - - Debug Exception (#DB):
47     -
48     - This takes the paranoid exit path only when the INT1 breakpoint is in
49     - kernel space. #DB on a user space address takes the regular exit path,
50     - so no extra mitigation required.
51     + (The reason that NMI is special is that prepare_exit_to_usermode() can
52     + enable IRQs. In NMI context, NMIs are blocked, and we don't want to
53     + enable IRQs with NMIs blocked.)
54    
55    
56     2. C-State transition
57     diff --git a/Makefile b/Makefile
58     index dd11f5a83d2f..b21dd3866b63 100644
59     --- a/Makefile
60     +++ b/Makefile
61     @@ -1,7 +1,7 @@
62     # SPDX-License-Identifier: GPL-2.0
63     VERSION = 4
64     PATCHLEVEL = 19
65     -SUBLEVEL = 44
66     +SUBLEVEL = 45
67     EXTRAVERSION =
68     NAME = "People's Front"
69    
70     @@ -623,7 +623,7 @@ ifeq ($(may-sync-config),1)
71     # Read in dependencies to all Kconfig* files, make sure to run syncconfig if
72     # changes are detected. This should be included after arch/$(SRCARCH)/Makefile
73     # because some architectures define CROSS_COMPILE there.
74     --include include/config/auto.conf.cmd
75     +include include/config/auto.conf.cmd
76    
77     # To avoid any implicit rule to kick in, define an empty command
78     $(KCONFIG_CONFIG): ;
79     diff --git a/arch/arm/boot/dts/exynos5260.dtsi b/arch/arm/boot/dts/exynos5260.dtsi
80     index 55167850619c..33a085ffc447 100644
81     --- a/arch/arm/boot/dts/exynos5260.dtsi
82     +++ b/arch/arm/boot/dts/exynos5260.dtsi
83     @@ -223,7 +223,7 @@
84     wakeup-interrupt-controller {
85     compatible = "samsung,exynos4210-wakeup-eint";
86     interrupt-parent = <&gic>;
87     - interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
88     + interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
89     };
90     };
91    
92     diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
93     index e84544b220b9..b90cea8b7368 100644
94     --- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
95     +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
96     @@ -22,7 +22,7 @@
97     "Headphone Jack", "HPL",
98     "Headphone Jack", "HPR",
99     "Headphone Jack", "MICBIAS",
100     - "IN1", "Headphone Jack",
101     + "IN12", "Headphone Jack",
102     "Speakers", "SPKL",
103     "Speakers", "SPKR";
104    
105     diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
106     index 07e31941dc67..617c2c99ebfb 100644
107     --- a/arch/arm/crypto/aes-neonbs-glue.c
108     +++ b/arch/arm/crypto/aes-neonbs-glue.c
109     @@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_request *req,
110     int err;
111    
112     err = skcipher_walk_virt(&walk, req, true);
113     + if (err)
114     + return err;
115    
116     crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
117    
118     diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
119     index be1f20fe28f4..fbe1db61115f 100644
120     --- a/arch/arm/mach-exynos/firmware.c
121     +++ b/arch/arm/mach-exynos/firmware.c
122     @@ -196,6 +196,7 @@ void __init exynos_firmware_init(void)
123     return;
124    
125     addr = of_get_address(nd, 0, NULL, NULL);
126     + of_node_put(nd);
127     if (!addr) {
128     pr_err("%s: No address specified.\n", __func__);
129     return;
130     diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
131     index 7ead3acd6fa4..b1fe53e8b460 100644
132     --- a/arch/arm/mach-exynos/suspend.c
133     +++ b/arch/arm/mach-exynos/suspend.c
134     @@ -639,8 +639,10 @@ void __init exynos_pm_init(void)
135    
136     if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
137     pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
138     + of_node_put(np);
139     return;
140     }
141     + of_node_put(np);
142    
143     pm_data = (const struct exynos_pm_data *) match->data;
144    
145     diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
146     index c88e603396f6..df7e62d9a670 100644
147     --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
148     +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
149     @@ -305,6 +305,7 @@
150     phys = <&emmc_phy>;
151     phy-names = "phy_arasan";
152     power-domains = <&power RK3399_PD_EMMC>;
153     + disable-cqe-dcmd;
154     status = "disabled";
155     };
156    
157     diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
158     index e7a95a566462..5cc248967387 100644
159     --- a/arch/arm64/crypto/aes-neonbs-glue.c
160     +++ b/arch/arm64/crypto/aes-neonbs-glue.c
161     @@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req,
162     int err;
163    
164     err = skcipher_walk_virt(&walk, req, false);
165     + if (err)
166     + return err;
167    
168     kernel_neon_begin();
169     neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
170     diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
171     index 067d8937d5af..1ed227bf6106 100644
172     --- a/arch/arm64/crypto/ghash-ce-glue.c
173     +++ b/arch/arm64/crypto/ghash-ce-glue.c
174     @@ -418,9 +418,11 @@ static int gcm_encrypt(struct aead_request *req)
175     put_unaligned_be32(2, iv + GCM_IV_SIZE);
176    
177     while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
178     - int blocks = walk.nbytes / AES_BLOCK_SIZE;
179     + const int blocks =
180     + walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
181     u8 *dst = walk.dst.virt.addr;
182     u8 *src = walk.src.virt.addr;
183     + int remaining = blocks;
184    
185     do {
186     __aes_arm64_encrypt(ctx->aes_key.key_enc,
187     @@ -430,9 +432,9 @@ static int gcm_encrypt(struct aead_request *req)
188    
189     dst += AES_BLOCK_SIZE;
190     src += AES_BLOCK_SIZE;
191     - } while (--blocks > 0);
192     + } while (--remaining > 0);
193    
194     - ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
195     + ghash_do_update(blocks, dg,
196     walk.dst.virt.addr, &ctx->ghash_key,
197     NULL);
198    
199     @@ -553,7 +555,7 @@ static int gcm_decrypt(struct aead_request *req)
200     put_unaligned_be32(2, iv + GCM_IV_SIZE);
201    
202     while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
203     - int blocks = walk.nbytes / AES_BLOCK_SIZE;
204     + int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
205     u8 *dst = walk.dst.virt.addr;
206     u8 *src = walk.src.virt.addr;
207    
208     diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
209     index f2a234d6516c..93e07512b4b6 100644
210     --- a/arch/arm64/include/asm/arch_timer.h
211     +++ b/arch/arm64/include/asm/arch_timer.h
212     @@ -148,18 +148,47 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
213     isb();
214     }
215    
216     +/*
217     + * Ensure that reads of the counter are treated the same as memory reads
218     + * for the purposes of ordering by subsequent memory barriers.
219     + *
220     + * This insanity brought to you by speculative system register reads,
221     + * out-of-order memory accesses, sequence locks and Thomas Gleixner.
222     + *
223     + * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
224     + */
225     +#define arch_counter_enforce_ordering(val) do { \
226     + u64 tmp, _val = (val); \
227     + \
228     + asm volatile( \
229     + " eor %0, %1, %1\n" \
230     + " add %0, sp, %0\n" \
231     + " ldr xzr, [%0]" \
232     + : "=r" (tmp) : "r" (_val)); \
233     +} while (0)
234     +
235     static inline u64 arch_counter_get_cntpct(void)
236     {
237     + u64 cnt;
238     +
239     isb();
240     - return arch_timer_reg_read_stable(cntpct_el0);
241     + cnt = arch_timer_reg_read_stable(cntpct_el0);
242     + arch_counter_enforce_ordering(cnt);
243     + return cnt;
244     }
245    
246     static inline u64 arch_counter_get_cntvct(void)
247     {
248     + u64 cnt;
249     +
250     isb();
251     - return arch_timer_reg_read_stable(cntvct_el0);
252     + cnt = arch_timer_reg_read_stable(cntvct_el0);
253     + arch_counter_enforce_ordering(cnt);
254     + return cnt;
255     }
256    
257     +#undef arch_counter_enforce_ordering
258     +
259     static inline int arch_timer_arch_init(void)
260     {
261     return 0;
262     diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
263     index 79657ad91397..def5a5e807f0 100644
264     --- a/arch/arm64/include/asm/processor.h
265     +++ b/arch/arm64/include/asm/processor.h
266     @@ -53,7 +53,15 @@
267     * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
268     */
269     #ifdef CONFIG_COMPAT
270     +#ifdef CONFIG_ARM64_64K_PAGES
271     +/*
272     + * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
273     + * by the compat vectors page.
274     + */
275     #define TASK_SIZE_32 UL(0x100000000)
276     +#else
277     +#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
278     +#endif /* CONFIG_ARM64_64K_PAGES */
279     #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
280     TASK_SIZE_32 : TASK_SIZE_64)
281     #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
282     diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
283     index 06ca574495af..262925b98f42 100644
284     --- a/arch/arm64/kernel/debug-monitors.c
285     +++ b/arch/arm64/kernel/debug-monitors.c
286     @@ -135,6 +135,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors);
287     */
288     static int clear_os_lock(unsigned int cpu)
289     {
290     + write_sysreg(0, osdlr_el1);
291     write_sysreg(0, oslar_el1);
292     isb();
293     return 0;
294     diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
295     index b44065fb1616..6f91e8116514 100644
296     --- a/arch/arm64/kernel/sys.c
297     +++ b/arch/arm64/kernel/sys.c
298     @@ -31,7 +31,7 @@
299    
300     SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
301     unsigned long, prot, unsigned long, flags,
302     - unsigned long, fd, off_t, off)
303     + unsigned long, fd, unsigned long, off)
304     {
305     if (offset_in_page(off) != 0)
306     return -EINVAL;
307     diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
308     index c39872a7b03c..e8f60112818f 100644
309     --- a/arch/arm64/kernel/vdso/gettimeofday.S
310     +++ b/arch/arm64/kernel/vdso/gettimeofday.S
311     @@ -73,6 +73,13 @@ x_tmp .req x8
312     movn x_tmp, #0xff00, lsl #48
313     and \res, x_tmp, \res
314     mul \res, \res, \mult
315     + /*
316     + * Fake address dependency from the value computed from the counter
317     + * register to subsequent data page accesses so that the sequence
318     + * locking also orders the read of the counter.
319     + */
320     + and x_tmp, \res, xzr
321     + add vdso_data, vdso_data, x_tmp
322     .endm
323    
324     /*
325     @@ -147,12 +154,12 @@ ENTRY(__kernel_gettimeofday)
326     /* w11 = cs_mono_mult, w12 = cs_shift */
327     ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
328     ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
329     - seqcnt_check fail=1b
330    
331     get_nsec_per_sec res=x9
332     lsl x9, x9, x12
333    
334     get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
335     + seqcnt_check fail=1b
336     get_ts_realtime res_sec=x10, res_nsec=x11, \
337     clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
338    
339     @@ -211,13 +218,13 @@ realtime:
340     /* w11 = cs_mono_mult, w12 = cs_shift */
341     ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
342     ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
343     - seqcnt_check fail=realtime
344    
345     /* All computations are done with left-shifted nsecs. */
346     get_nsec_per_sec res=x9
347     lsl x9, x9, x12
348    
349     get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
350     + seqcnt_check fail=realtime
351     get_ts_realtime res_sec=x10, res_nsec=x11, \
352     clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
353     clock_gettime_return, shift=1
354     @@ -231,7 +238,6 @@ monotonic:
355     ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
356     ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
357     ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
358     - seqcnt_check fail=monotonic
359    
360     /* All computations are done with left-shifted nsecs. */
361     lsl x4, x4, x12
362     @@ -239,6 +245,7 @@ monotonic:
363     lsl x9, x9, x12
364    
365     get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
366     + seqcnt_check fail=monotonic
367     get_ts_realtime res_sec=x10, res_nsec=x11, \
368     clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
369    
370     @@ -253,13 +260,13 @@ monotonic_raw:
371     /* w11 = cs_raw_mult, w12 = cs_shift */
372     ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT]
373     ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
374     - seqcnt_check fail=monotonic_raw
375    
376     /* All computations are done with left-shifted nsecs. */
377     get_nsec_per_sec res=x9
378     lsl x9, x9, x12
379    
380     get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
381     + seqcnt_check fail=monotonic_raw
382     get_ts_clock_raw res_sec=x10, res_nsec=x11, \
383     clock_nsec=x15, nsec_to_sec=x9
384    
385     diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
386     index 03646e6a2ef4..8cce091b6c21 100644
387     --- a/arch/arm64/mm/proc.S
388     +++ b/arch/arm64/mm/proc.S
389     @@ -70,24 +70,25 @@ ENTRY(cpu_do_suspend)
390     mrs x2, tpidr_el0
391     mrs x3, tpidrro_el0
392     mrs x4, contextidr_el1
393     - mrs x5, cpacr_el1
394     - mrs x6, tcr_el1
395     - mrs x7, vbar_el1
396     - mrs x8, mdscr_el1
397     - mrs x9, oslsr_el1
398     - mrs x10, sctlr_el1
399     + mrs x5, osdlr_el1
400     + mrs x6, cpacr_el1
401     + mrs x7, tcr_el1
402     + mrs x8, vbar_el1
403     + mrs x9, mdscr_el1
404     + mrs x10, oslsr_el1
405     + mrs x11, sctlr_el1
406     alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
407     - mrs x11, tpidr_el1
408     + mrs x12, tpidr_el1
409     alternative_else
410     - mrs x11, tpidr_el2
411     + mrs x12, tpidr_el2
412     alternative_endif
413     - mrs x12, sp_el0
414     + mrs x13, sp_el0
415     stp x2, x3, [x0]
416     - stp x4, xzr, [x0, #16]
417     - stp x5, x6, [x0, #32]
418     - stp x7, x8, [x0, #48]
419     - stp x9, x10, [x0, #64]
420     - stp x11, x12, [x0, #80]
421     + stp x4, x5, [x0, #16]
422     + stp x6, x7, [x0, #32]
423     + stp x8, x9, [x0, #48]
424     + stp x10, x11, [x0, #64]
425     + stp x12, x13, [x0, #80]
426     ret
427     ENDPROC(cpu_do_suspend)
428    
429     @@ -110,8 +111,8 @@ ENTRY(cpu_do_resume)
430     msr cpacr_el1, x6
431    
432     /* Don't change t0sz here, mask those bits when restoring */
433     - mrs x5, tcr_el1
434     - bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
435     + mrs x7, tcr_el1
436     + bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
437    
438     msr tcr_el1, x8
439     msr vbar_el1, x9
440     @@ -135,6 +136,7 @@ alternative_endif
441     /*
442     * Restore oslsr_el1 by writing oslar_el1
443     */
444     + msr osdlr_el1, x5
445     ubfx x11, x11, #1, #1
446     msr oslar_el1, x11
447     reset_pmuserenr_el0 x0 // Disable PMU access from EL0
448     diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
449     index 783de51a6c4e..6c881659ee8a 100644
450     --- a/arch/arm64/net/bpf_jit.h
451     +++ b/arch/arm64/net/bpf_jit.h
452     @@ -100,12 +100,6 @@
453     #define A64_STXR(sf, Rt, Rn, Rs) \
454     A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
455    
456     -/* Prefetch */
457     -#define A64_PRFM(Rn, type, target, policy) \
458     - aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \
459     - AARCH64_INSN_PRFM_TARGET_##target, \
460     - AARCH64_INSN_PRFM_POLICY_##policy)
461     -
462     /* Add/subtract (immediate) */
463     #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
464     aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
465     diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
466     index a6fdaea07c63..2eef156b38bb 100644
467     --- a/arch/arm64/net/bpf_jit_comp.c
468     +++ b/arch/arm64/net/bpf_jit_comp.c
469     @@ -736,7 +736,6 @@ emit_cond_jmp:
470     case BPF_STX | BPF_XADD | BPF_DW:
471     emit_a64_mov_i(1, tmp, off, ctx);
472     emit(A64_ADD(1, tmp, tmp, dst), ctx);
473     - emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
474     emit(A64_LDXR(isdw, tmp2, tmp), ctx);
475     emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
476     emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
477     diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
478     index cd4df9322501..7bbfe7d35da7 100644
479     --- a/arch/x86/crypto/crct10dif-pclmul_glue.c
480     +++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
481     @@ -76,15 +76,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
482     return 0;
483     }
484    
485     -static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
486     - u8 *out)
487     +static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
488     {
489     if (irq_fpu_usable()) {
490     kernel_fpu_begin();
491     - *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
492     + *(__u16 *)out = crc_t10dif_pcl(crc, data, len);
493     kernel_fpu_end();
494     } else
495     - *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
496     + *(__u16 *)out = crc_t10dif_generic(crc, data, len);
497     return 0;
498     }
499    
500     @@ -93,15 +92,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
501     {
502     struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
503    
504     - return __chksum_finup(&ctx->crc, data, len, out);
505     + return __chksum_finup(ctx->crc, data, len, out);
506     }
507    
508     static int chksum_digest(struct shash_desc *desc, const u8 *data,
509     unsigned int length, u8 *out)
510     {
511     - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
512     -
513     - return __chksum_finup(&ctx->crc, data, length, out);
514     + return __chksum_finup(0, data, length, out);
515     }
516    
517     static struct shash_alg alg = {
518     diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
519     index fbbf1ba57ec6..b5c2b1091b18 100644
520     --- a/arch/x86/entry/entry_32.S
521     +++ b/arch/x86/entry/entry_32.S
522     @@ -648,6 +648,7 @@ ENTRY(__switch_to_asm)
523     pushl %ebx
524     pushl %edi
525     pushl %esi
526     + pushfl
527    
528     /* switch stack */
529     movl %esp, TASK_threadsp(%eax)
530     @@ -670,6 +671,7 @@ ENTRY(__switch_to_asm)
531     #endif
532    
533     /* restore callee-saved registers */
534     + popfl
535     popl %esi
536     popl %edi
537     popl %ebx
538     diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
539     index 617df50a11d9..585bbc5b3216 100644
540     --- a/arch/x86/entry/entry_64.S
541     +++ b/arch/x86/entry/entry_64.S
542     @@ -352,6 +352,7 @@ ENTRY(__switch_to_asm)
543     pushq %r13
544     pushq %r14
545     pushq %r15
546     + pushfq
547    
548     /* switch stack */
549     movq %rsp, TASK_threadsp(%rdi)
550     @@ -374,6 +375,7 @@ ENTRY(__switch_to_asm)
551     #endif
552    
553     /* restore callee-saved registers */
554     + popfq
555     popq %r15
556     popq %r14
557     popq %r13
558     diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
559     index 7cf1a270d891..157149d4129c 100644
560     --- a/arch/x86/include/asm/switch_to.h
561     +++ b/arch/x86/include/asm/switch_to.h
562     @@ -40,6 +40,7 @@ asmlinkage void ret_from_fork(void);
563     * order of the fields must match the code in __switch_to_asm().
564     */
565     struct inactive_task_frame {
566     + unsigned long flags;
567     #ifdef CONFIG_X86_64
568     unsigned long r15;
569     unsigned long r14;
570     diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
571     index d3e593eb189f..020efe0f9614 100644
572     --- a/arch/x86/kernel/process_32.c
573     +++ b/arch/x86/kernel/process_32.c
574     @@ -130,6 +130,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
575     struct task_struct *tsk;
576     int err;
577    
578     + /*
579     + * For a new task use the RESET flags value since there is no before.
580     + * All the status flags are zero; DF and all the system flags must also
581     + * be 0, specifically IF must be 0 because we context switch to the new
582     + * task with interrupts disabled.
583     + */
584     + frame->flags = X86_EFLAGS_FIXED;
585     frame->bp = 0;
586     frame->ret_addr = (unsigned long) ret_from_fork;
587     p->thread.sp = (unsigned long) fork_frame;
588     diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
589     index a0854f283efd..59f71d0f2b23 100644
590     --- a/arch/x86/kernel/process_64.c
591     +++ b/arch/x86/kernel/process_64.c
592     @@ -300,6 +300,14 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
593     childregs = task_pt_regs(p);
594     fork_frame = container_of(childregs, struct fork_frame, regs);
595     frame = &fork_frame->frame;
596     +
597     + /*
598     + * For a new task use the RESET flags value since there is no before.
599     + * All the status flags are zero; DF and all the system flags must also
600     + * be 0, specifically IF must be 0 because we context switch to the new
601     + * task with interrupts disabled.
602     + */
603     + frame->flags = X86_EFLAGS_FIXED;
604     frame->bp = 0;
605     frame->ret_addr = (unsigned long) ret_from_fork;
606     p->thread.sp = (unsigned long) fork_frame;
607     diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
608     index 0a5efd764914..e6db475164ed 100644
609     --- a/arch/x86/kernel/traps.c
610     +++ b/arch/x86/kernel/traps.c
611     @@ -58,7 +58,6 @@
612     #include <asm/alternative.h>
613     #include <asm/fpu/xstate.h>
614     #include <asm/trace/mpx.h>
615     -#include <asm/nospec-branch.h>
616     #include <asm/mpx.h>
617     #include <asm/vm86.h>
618     #include <asm/umip.h>
619     @@ -388,13 +387,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
620     regs->ip = (unsigned long)general_protection;
621     regs->sp = (unsigned long)&gpregs->orig_ax;
622    
623     - /*
624     - * This situation can be triggered by userspace via
625     - * modify_ldt(2) and the return does not take the regular
626     - * user space exit, so a CPU buffer clear is required when
627     - * MDS mitigation is enabled.
628     - */
629     - mds_user_clear_cpu_buffers();
630     return;
631     }
632     #endif
633     diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
634     index d2f5aa220355..cba414db14cb 100644
635     --- a/arch/x86/kvm/lapic.c
636     +++ b/arch/x86/kvm/lapic.c
637     @@ -1449,7 +1449,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
638     if (swait_active(q))
639     swake_up_one(q);
640    
641     - if (apic_lvtt_tscdeadline(apic))
642     + if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
643     ktimer->expired_tscdeadline = ktimer->tscdeadline;
644     }
645    
646     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
647     index f3337adaf9b3..d0eb37c069b8 100644
648     --- a/arch/x86/kvm/x86.c
649     +++ b/arch/x86/kvm/x86.c
650     @@ -1162,31 +1162,42 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
651     return 0;
652     }
653    
654     -bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
655     +static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
656     {
657     - if (efer & efer_reserved_bits)
658     - return false;
659     -
660     if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
661     - return false;
662     + return false;
663    
664     if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
665     - return false;
666     + return false;
667    
668     return true;
669     +
670     +}
671     +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
672     +{
673     + if (efer & efer_reserved_bits)
674     + return false;
675     +
676     + return __kvm_valid_efer(vcpu, efer);
677     }
678     EXPORT_SYMBOL_GPL(kvm_valid_efer);
679    
680     -static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
681     +static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
682     {
683     u64 old_efer = vcpu->arch.efer;
684     + u64 efer = msr_info->data;
685    
686     - if (!kvm_valid_efer(vcpu, efer))
687     - return 1;
688     + if (efer & efer_reserved_bits)
689     + return false;
690    
691     - if (is_paging(vcpu)
692     - && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
693     - return 1;
694     + if (!msr_info->host_initiated) {
695     + if (!__kvm_valid_efer(vcpu, efer))
696     + return 1;
697     +
698     + if (is_paging(vcpu) &&
699     + (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
700     + return 1;
701     + }
702    
703     efer &= ~EFER_LMA;
704     efer |= vcpu->arch.efer & EFER_LMA;
705     @@ -2356,7 +2367,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
706     vcpu->arch.arch_capabilities = data;
707     break;
708     case MSR_EFER:
709     - return set_efer(vcpu, data);
710     + return set_efer(vcpu, msr_info);
711     case MSR_K7_HWCR:
712     data &= ~(u64)0x40; /* ignore flush filter disable */
713     data &= ~(u64)0x100; /* ignore ignne emulation enable */
714     diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
715     index f7f77023288a..dab07827d25e 100644
716     --- a/arch/x86/xen/enlighten_pvh.c
717     +++ b/arch/x86/xen/enlighten_pvh.c
718     @@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void)
719     }
720    
721     xen_pvh = 1;
722     + xen_domain_type = XEN_HVM_DOMAIN;
723     xen_start_flags = pvh_start_info.flags;
724    
725     msr = cpuid_ebx(xen_cpuid_base() + 2);
726     diff --git a/crypto/ccm.c b/crypto/ccm.c
727     index 0a083342ec8c..8104c564dd31 100644
728     --- a/crypto/ccm.c
729     +++ b/crypto/ccm.c
730     @@ -455,7 +455,6 @@ static void crypto_ccm_free(struct aead_instance *inst)
731    
732     static int crypto_ccm_create_common(struct crypto_template *tmpl,
733     struct rtattr **tb,
734     - const char *full_name,
735     const char *ctr_name,
736     const char *mac_name)
737     {
738     @@ -483,7 +482,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
739    
740     mac = __crypto_hash_alg_common(mac_alg);
741     err = -EINVAL;
742     - if (mac->digestsize != 16)
743     + if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
744     + mac->digestsize != 16)
745     goto out_put_mac;
746    
747     inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
748     @@ -506,23 +506,27 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
749    
750     ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
751    
752     - /* Not a stream cipher? */
753     + /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
754     err = -EINVAL;
755     - if (ctr->base.cra_blocksize != 1)
756     + if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
757     + crypto_skcipher_alg_ivsize(ctr) != 16 ||
758     + ctr->base.cra_blocksize != 1)
759     goto err_drop_ctr;
760    
761     - /* We want the real thing! */
762     - if (crypto_skcipher_alg_ivsize(ctr) != 16)
763     + /* ctr and cbcmac must use the same underlying block cipher. */
764     + if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0)
765     goto err_drop_ctr;
766    
767     err = -ENAMETOOLONG;
768     + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
769     + "ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
770     + goto err_drop_ctr;
771     +
772     if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
773     "ccm_base(%s,%s)", ctr->base.cra_driver_name,
774     mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
775     goto err_drop_ctr;
776    
777     - memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
778     -
779     inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
780     inst->alg.base.cra_priority = (mac->base.cra_priority +
781     ctr->base.cra_priority) / 2;
782     @@ -564,7 +568,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
783     const char *cipher_name;
784     char ctr_name[CRYPTO_MAX_ALG_NAME];
785     char mac_name[CRYPTO_MAX_ALG_NAME];
786     - char full_name[CRYPTO_MAX_ALG_NAME];
787    
788     cipher_name = crypto_attr_alg_name(tb[1]);
789     if (IS_ERR(cipher_name))
790     @@ -578,12 +581,7 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
791     cipher_name) >= CRYPTO_MAX_ALG_NAME)
792     return -ENAMETOOLONG;
793    
794     - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
795     - CRYPTO_MAX_ALG_NAME)
796     - return -ENAMETOOLONG;
797     -
798     - return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
799     - mac_name);
800     + return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
801     }
802    
803     static struct crypto_template crypto_ccm_tmpl = {
804     @@ -596,23 +594,17 @@ static int crypto_ccm_base_create(struct crypto_template *tmpl,
805     struct rtattr **tb)
806     {
807     const char *ctr_name;
808     - const char *cipher_name;
809     - char full_name[CRYPTO_MAX_ALG_NAME];
810     + const char *mac_name;
811    
812     ctr_name = crypto_attr_alg_name(tb[1]);
813     if (IS_ERR(ctr_name))
814     return PTR_ERR(ctr_name);
815    
816     - cipher_name = crypto_attr_alg_name(tb[2]);
817     - if (IS_ERR(cipher_name))
818     - return PTR_ERR(cipher_name);
819     -
820     - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
821     - ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
822     - return -ENAMETOOLONG;
823     + mac_name = crypto_attr_alg_name(tb[2]);
824     + if (IS_ERR(mac_name))
825     + return PTR_ERR(mac_name);
826    
827     - return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
828     - cipher_name);
829     + return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
830     }
831    
832     static struct crypto_template crypto_ccm_base_tmpl = {
833     diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
834     index 600afa99941f..4d6f51bcdfab 100644
835     --- a/crypto/chacha20poly1305.c
836     +++ b/crypto/chacha20poly1305.c
837     @@ -647,8 +647,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
838    
839     err = -ENAMETOOLONG;
840     if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
841     - "%s(%s,%s)", name, chacha_name,
842     - poly_name) >= CRYPTO_MAX_ALG_NAME)
843     + "%s(%s,%s)", name, chacha->base.cra_name,
844     + poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
845     goto out_drop_chacha;
846     if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
847     "%s(%s,%s)", name, chacha->base.cra_driver_name,
848     diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
849     index 8e94e29dc6fc..d08048ae5552 100644
850     --- a/crypto/crct10dif_generic.c
851     +++ b/crypto/crct10dif_generic.c
852     @@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
853     return 0;
854     }
855    
856     -static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
857     - u8 *out)
858     +static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
859     {
860     - *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
861     + *(__u16 *)out = crc_t10dif_generic(crc, data, len);
862     return 0;
863     }
864    
865     @@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
866     {
867     struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
868    
869     - return __chksum_finup(&ctx->crc, data, len, out);
870     + return __chksum_finup(ctx->crc, data, len, out);
871     }
872    
873     static int chksum_digest(struct shash_desc *desc, const u8 *data,
874     unsigned int length, u8 *out)
875     {
876     - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
877     -
878     - return __chksum_finup(&ctx->crc, data, length, out);
879     + return __chksum_finup(0, data, length, out);
880     }
881    
882     static struct shash_alg alg = {
883     diff --git a/crypto/gcm.c b/crypto/gcm.c
884     index 0ad879e1f9b2..9b0ea3ded1a4 100644
885     --- a/crypto/gcm.c
886     +++ b/crypto/gcm.c
887     @@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
888    
889     static int crypto_gcm_create_common(struct crypto_template *tmpl,
890     struct rtattr **tb,
891     - const char *full_name,
892     const char *ctr_name,
893     const char *ghash_name)
894     {
895     @@ -638,7 +637,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
896     goto err_free_inst;
897    
898     err = -EINVAL;
899     - if (ghash->digestsize != 16)
900     + if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
901     + ghash->digestsize != 16)
902     goto err_drop_ghash;
903    
904     crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
905     @@ -650,24 +650,24 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
906    
907     ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
908    
909     - /* We only support 16-byte blocks. */
910     + /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
911     err = -EINVAL;
912     - if (crypto_skcipher_alg_ivsize(ctr) != 16)
913     + if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
914     + crypto_skcipher_alg_ivsize(ctr) != 16 ||
915     + ctr->base.cra_blocksize != 1)
916     goto out_put_ctr;
917    
918     - /* Not a stream cipher? */
919     - if (ctr->base.cra_blocksize != 1)
920     + err = -ENAMETOOLONG;
921     + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
922     + "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
923     goto out_put_ctr;
924    
925     - err = -ENAMETOOLONG;
926     if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
927     "gcm_base(%s,%s)", ctr->base.cra_driver_name,
928     ghash_alg->cra_driver_name) >=
929     CRYPTO_MAX_ALG_NAME)
930     goto out_put_ctr;
931    
932     - memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
933     -
934     inst->alg.base.cra_flags = (ghash->base.cra_flags |
935     ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
936     inst->alg.base.cra_priority = (ghash->base.cra_priority +
937     @@ -709,7 +709,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
938     {
939     const char *cipher_name;
940     char ctr_name[CRYPTO_MAX_ALG_NAME];
941     - char full_name[CRYPTO_MAX_ALG_NAME];
942    
943     cipher_name = crypto_attr_alg_name(tb[1]);
944     if (IS_ERR(cipher_name))
945     @@ -719,12 +718,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
946     CRYPTO_MAX_ALG_NAME)
947     return -ENAMETOOLONG;
948    
949     - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
950     - CRYPTO_MAX_ALG_NAME)
951     - return -ENAMETOOLONG;
952     -
953     - return crypto_gcm_create_common(tmpl, tb, full_name,
954     - ctr_name, "ghash");
955     + return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
956     }
957    
958     static struct crypto_template crypto_gcm_tmpl = {
959     @@ -738,7 +732,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
960     {
961     const char *ctr_name;
962     const char *ghash_name;
963     - char full_name[CRYPTO_MAX_ALG_NAME];
964    
965     ctr_name = crypto_attr_alg_name(tb[1]);
966     if (IS_ERR(ctr_name))
967     @@ -748,12 +741,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
968     if (IS_ERR(ghash_name))
969     return PTR_ERR(ghash_name);
970    
971     - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
972     - ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
973     - return -ENAMETOOLONG;
974     -
975     - return crypto_gcm_create_common(tmpl, tb, full_name,
976     - ctr_name, ghash_name);
977     + return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
978     }
979    
980     static struct crypto_template crypto_gcm_base_tmpl = {
981     diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
982     index 8c77bc78a09f..df8fc0f54374 100644
983     --- a/crypto/salsa20_generic.c
984     +++ b/crypto/salsa20_generic.c
985     @@ -161,7 +161,7 @@ static int salsa20_crypt(struct skcipher_request *req)
986    
987     err = skcipher_walk_virt(&walk, req, true);
988    
989     - salsa20_init(state, ctx, walk.iv);
990     + salsa20_init(state, ctx, req->iv);
991    
992     while (walk.nbytes > 0) {
993     unsigned int nbytes = walk.nbytes;
994     diff --git a/crypto/skcipher.c b/crypto/skcipher.c
995     index 46bb300d418f..b664cf867f5f 100644
996     --- a/crypto/skcipher.c
997     +++ b/crypto/skcipher.c
998     @@ -131,8 +131,13 @@ unmap_src:
999     memcpy(walk->dst.virt.addr, walk->page, n);
1000     skcipher_unmap_dst(walk);
1001     } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
1002     - if (WARN_ON(err)) {
1003     - /* unexpected case; didn't process all bytes */
1004     + if (err) {
1005     + /*
1006     + * Didn't process all bytes. Either the algorithm is
1007     + * broken, or this was the last step and it turned out
1008     + * the message wasn't evenly divisible into blocks but
1009     + * the algorithm requires it.
1010     + */
1011     err = -EINVAL;
1012     goto finish;
1013     }
1014     diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
1015     index 74c489047f57..847db3edcb5b 100644
1016     --- a/drivers/acpi/sleep.c
1017     +++ b/drivers/acpi/sleep.c
1018     @@ -977,6 +977,8 @@ static int acpi_s2idle_prepare(void)
1019     if (acpi_sci_irq_valid())
1020     enable_irq_wake(acpi_sci_irq);
1021    
1022     + acpi_enable_wakeup_devices(ACPI_STATE_S0);
1023     +
1024     /* Change the configuration of GPEs to avoid spurious wakeup. */
1025     acpi_enable_all_wakeup_gpes();
1026     acpi_os_wait_events_complete();
1027     @@ -1026,6 +1028,8 @@ static void acpi_s2idle_restore(void)
1028     {
1029     acpi_enable_all_runtime_gpes();
1030    
1031     + acpi_disable_wakeup_devices(ACPI_STATE_S0);
1032     +
1033     if (acpi_sci_irq_valid())
1034     disable_irq_wake(acpi_sci_irq);
1035    
1036     diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
1037     index 76c2010ba672..af44db2dfb68 100644
1038     --- a/drivers/char/ipmi/ipmi_ssif.c
1039     +++ b/drivers/char/ipmi/ipmi_ssif.c
1040     @@ -688,12 +688,16 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
1041     /* End of read */
1042     len = ssif_info->multi_len;
1043     data = ssif_info->data;
1044     - } else if (blocknum != ssif_info->multi_pos) {
1045     + } else if (blocknum + 1 != ssif_info->multi_pos) {
1046     /*
1047     * Out of sequence block, just abort. Block
1048     * numbers start at zero for the second block,
1049     * but multi_pos starts at one, so the +1.
1050     */
1051     + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
1052     + dev_dbg(&ssif_info->client->dev,
1053     + "Received message out of sequence, expected %u, got %u\n",
1054     + ssif_info->multi_pos - 1, blocknum);
1055     result = -EIO;
1056     } else {
1057     ssif_inc_stat(ssif_info, received_message_parts);
1058     diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
1059     index f5c07498ea4f..0c85a5123f85 100644
1060     --- a/drivers/crypto/amcc/crypto4xx_alg.c
1061     +++ b/drivers/crypto/amcc/crypto4xx_alg.c
1062     @@ -141,9 +141,10 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
1063     /* Setup SA */
1064     sa = ctx->sa_in;
1065    
1066     - set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
1067     - SA_SAVE_IV : SA_NOT_SAVE_IV),
1068     - SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
1069     + set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
1070     + SA_NOT_SAVE_IV : SA_SAVE_IV),
1071     + SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
1072     + SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
1073     SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
1074     SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
1075     SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
1076     @@ -162,6 +163,11 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
1077     memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
1078     sa = ctx->sa_out;
1079     sa->sa_command_0.bf.dir = DIR_OUTBOUND;
1080     + /*
1081     + * SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
1082     + * it's the DIR_(IN|OUT)BOUND that matters
1083     + */
1084     + sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
1085    
1086     return 0;
1087     }
1088     diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
1089     index 6eaec9ba0f68..d2ec9fd1b8bb 100644
1090     --- a/drivers/crypto/amcc/crypto4xx_core.c
1091     +++ b/drivers/crypto/amcc/crypto4xx_core.c
1092     @@ -712,7 +712,23 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
1093     size_t offset_to_sr_ptr;
1094     u32 gd_idx = 0;
1095     int tmp;
1096     - bool is_busy;
1097     + bool is_busy, force_sd;
1098     +
1099     + /*
1100     + * There's a very subtile/disguised "bug" in the hardware that
1101     + * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
1102     + * of the hardware spec:
1103     + * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
1104     + * operation modes for >>> "Block ciphers" <<<.
1105     + *
1106     + * To workaround this issue and stop the hardware from causing
1107     + * "overran dst buffer" on crypttexts that are not a multiple
1108     + * of 16 (AES_BLOCK_SIZE), we force the driver to use the
1109     + * scatter buffers.
1110     + */
1111     + force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
1112     + || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
1113     + && (datalen % AES_BLOCK_SIZE);
1114    
1115     /* figure how many gd are needed */
1116     tmp = sg_nents_for_len(src, assoclen + datalen);
1117     @@ -730,7 +746,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
1118     }
1119    
1120     /* figure how many sd are needed */
1121     - if (sg_is_last(dst)) {
1122     + if (sg_is_last(dst) && force_sd == false) {
1123     num_sd = 0;
1124     } else {
1125     if (datalen > PPC4XX_SD_BUFFER_SIZE) {
1126     @@ -805,9 +821,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
1127     pd->sa_len = sa_len;
1128    
1129     pd_uinfo = &dev->pdr_uinfo[pd_entry];
1130     - pd_uinfo->async_req = req;
1131     pd_uinfo->num_gd = num_gd;
1132     pd_uinfo->num_sd = num_sd;
1133     + pd_uinfo->dest_va = dst;
1134     + pd_uinfo->async_req = req;
1135    
1136     if (iv_len)
1137     memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
1138     @@ -826,7 +843,6 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
1139     /* get first gd we are going to use */
1140     gd_idx = fst_gd;
1141     pd_uinfo->first_gd = fst_gd;
1142     - pd_uinfo->num_gd = num_gd;
1143     gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
1144     pd->src = gd_dma;
1145     /* enable gather */
1146     @@ -863,17 +879,14 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
1147     * Indicate gather array is not used
1148     */
1149     pd_uinfo->first_gd = 0xffffffff;
1150     - pd_uinfo->num_gd = 0;
1151     }
1152     - if (sg_is_last(dst)) {
1153     + if (!num_sd) {
1154     /*
1155     * we know application give us dst a whole piece of memory
1156     * no need to use scatter ring.
1157     */
1158     pd_uinfo->using_sd = 0;
1159     pd_uinfo->first_sd = 0xffffffff;
1160     - pd_uinfo->num_sd = 0;
1161     - pd_uinfo->dest_va = dst;
1162     sa->sa_command_0.bf.scatter = 0;
1163     pd->dest = (u32)dma_map_page(dev->core_dev->device,
1164     sg_page(dst), dst->offset,
1165     @@ -887,9 +900,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
1166     nbytes = datalen;
1167     sa->sa_command_0.bf.scatter = 1;
1168     pd_uinfo->using_sd = 1;
1169     - pd_uinfo->dest_va = dst;
1170     pd_uinfo->first_sd = fst_sd;
1171     - pd_uinfo->num_sd = num_sd;
1172     sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
1173     pd->dest = sd_dma;
1174     /* setup scatter descriptor */
1175     diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
1176     index 72790d88236d..1603dc8d2d75 100644
1177     --- a/drivers/crypto/ccp/psp-dev.c
1178     +++ b/drivers/crypto/ccp/psp-dev.c
1179     @@ -935,7 +935,7 @@ void psp_pci_init(void)
1180     rc = sev_platform_init(&error);
1181     if (rc) {
1182     dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
1183     - goto err;
1184     + return;
1185     }
1186    
1187     dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
1188     diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
1189     index 5852d29ae2da..0669033f5be5 100644
1190     --- a/drivers/crypto/ccree/cc_aead.c
1191     +++ b/drivers/crypto/ccree/cc_aead.c
1192     @@ -415,7 +415,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
1193     /* This function prepers the user key so it can pass to the hmac processing
1194     * (copy to intenral buffer or hash in case of key longer than block
1195     */
1196     -static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
1197     +static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
1198     unsigned int keylen)
1199     {
1200     dma_addr_t key_dma_addr = 0;
1201     @@ -428,6 +428,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
1202     unsigned int hashmode;
1203     unsigned int idx = 0;
1204     int rc = 0;
1205     + u8 *key = NULL;
1206     struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
1207     dma_addr_t padded_authkey_dma_addr =
1208     ctx->auth_state.hmac.padded_authkey_dma_addr;
1209     @@ -446,11 +447,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
1210     }
1211    
1212     if (keylen != 0) {
1213     +
1214     + key = kmemdup(authkey, keylen, GFP_KERNEL);
1215     + if (!key)
1216     + return -ENOMEM;
1217     +
1218     key_dma_addr = dma_map_single(dev, (void *)key, keylen,
1219     DMA_TO_DEVICE);
1220     if (dma_mapping_error(dev, key_dma_addr)) {
1221     dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
1222     key, keylen);
1223     + kzfree(key);
1224     return -ENOMEM;
1225     }
1226     if (keylen > blocksize) {
1227     @@ -533,6 +540,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
1228     if (key_dma_addr)
1229     dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
1230    
1231     + kzfree(key);
1232     +
1233     return rc;
1234     }
1235    
1236     diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
1237     index 3bcb6bce666e..90b4870078fb 100644
1238     --- a/drivers/crypto/ccree/cc_buffer_mgr.c
1239     +++ b/drivers/crypto/ccree/cc_buffer_mgr.c
1240     @@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
1241     */
1242     static unsigned int cc_get_sgl_nents(struct device *dev,
1243     struct scatterlist *sg_list,
1244     - unsigned int nbytes, u32 *lbytes,
1245     - bool *is_chained)
1246     + unsigned int nbytes, u32 *lbytes)
1247     {
1248     unsigned int nents = 0;
1249    
1250     while (nbytes && sg_list) {
1251     - if (sg_list->length) {
1252     - nents++;
1253     - /* get the number of bytes in the last entry */
1254     - *lbytes = nbytes;
1255     - nbytes -= (sg_list->length > nbytes) ?
1256     - nbytes : sg_list->length;
1257     - sg_list = sg_next(sg_list);
1258     - } else {
1259     - sg_list = (struct scatterlist *)sg_page(sg_list);
1260     - if (is_chained)
1261     - *is_chained = true;
1262     - }
1263     + nents++;
1264     + /* get the number of bytes in the last entry */
1265     + *lbytes = nbytes;
1266     + nbytes -= (sg_list->length > nbytes) ?
1267     + nbytes : sg_list->length;
1268     + sg_list = sg_next(sg_list);
1269     }
1270     dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
1271     return nents;
1272     @@ -142,7 +135,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
1273     {
1274     u32 nents, lbytes;
1275    
1276     - nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
1277     + nents = cc_get_sgl_nents(dev, sg, end, &lbytes);
1278     sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
1279     (direct == CC_SG_TO_BUF));
1280     }
1281     @@ -311,40 +304,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
1282     sgl_data->num_of_buffers++;
1283     }
1284    
1285     -static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
1286     - enum dma_data_direction direction)
1287     -{
1288     - u32 i, j;
1289     - struct scatterlist *l_sg = sg;
1290     -
1291     - for (i = 0; i < nents; i++) {
1292     - if (!l_sg)
1293     - break;
1294     - if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
1295     - dev_err(dev, "dma_map_page() sg buffer failed\n");
1296     - goto err;
1297     - }
1298     - l_sg = sg_next(l_sg);
1299     - }
1300     - return nents;
1301     -
1302     -err:
1303     - /* Restore mapped parts */
1304     - for (j = 0; j < i; j++) {
1305     - if (!sg)
1306     - break;
1307     - dma_unmap_sg(dev, sg, 1, direction);
1308     - sg = sg_next(sg);
1309     - }
1310     - return 0;
1311     -}
1312     -
1313     static int cc_map_sg(struct device *dev, struct scatterlist *sg,
1314     unsigned int nbytes, int direction, u32 *nents,
1315     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
1316     {
1317     - bool is_chained = false;
1318     -
1319     if (sg_is_last(sg)) {
1320     /* One entry only case -set to DLLI */
1321     if (dma_map_sg(dev, sg, 1, direction) != 1) {
1322     @@ -358,35 +321,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
1323     *nents = 1;
1324     *mapped_nents = 1;
1325     } else { /*sg_is_last*/
1326     - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
1327     - &is_chained);
1328     + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
1329     if (*nents > max_sg_nents) {
1330     *nents = 0;
1331     dev_err(dev, "Too many fragments. current %d max %d\n",
1332     *nents, max_sg_nents);
1333     return -ENOMEM;
1334     }
1335     - if (!is_chained) {
1336     - /* In case of mmu the number of mapped nents might
1337     - * be changed from the original sgl nents
1338     - */
1339     - *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
1340     - if (*mapped_nents == 0) {
1341     - *nents = 0;
1342     - dev_err(dev, "dma_map_sg() sg buffer failed\n");
1343     - return -ENOMEM;
1344     - }
1345     - } else {
1346     - /*In this case the driver maps entry by entry so it
1347     - * must have the same nents before and after map
1348     - */
1349     - *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
1350     - direction);
1351     - if (*mapped_nents != *nents) {
1352     - *nents = *mapped_nents;
1353     - dev_err(dev, "dma_map_sg() sg buffer failed\n");
1354     - return -ENOMEM;
1355     - }
1356     + /* In case of mmu the number of mapped nents might
1357     + * be changed from the original sgl nents
1358     + */
1359     + *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
1360     + if (*mapped_nents == 0) {
1361     + *nents = 0;
1362     + dev_err(dev, "dma_map_sg() sg buffer failed\n");
1363     + return -ENOMEM;
1364     }
1365     }
1366    
1367     @@ -571,7 +520,6 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
1368     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1369     struct cc_drvdata *drvdata = dev_get_drvdata(dev);
1370     u32 dummy;
1371     - bool chained;
1372     u32 size_to_unmap = 0;
1373    
1374     if (areq_ctx->mac_buf_dma_addr) {
1375     @@ -612,6 +560,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
1376     if (areq_ctx->gen_ctx.iv_dma_addr) {
1377     dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
1378     hw_iv_size, DMA_BIDIRECTIONAL);
1379     + kzfree(areq_ctx->gen_ctx.iv);
1380     }
1381    
1382     /* Release pool */
1383     @@ -636,15 +585,14 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
1384     size_to_unmap += crypto_aead_ivsize(tfm);
1385    
1386     dma_unmap_sg(dev, req->src,
1387     - cc_get_sgl_nents(dev, req->src, size_to_unmap,
1388     - &dummy, &chained),
1389     + cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy),
1390     DMA_BIDIRECTIONAL);
1391     if (req->src != req->dst) {
1392     dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
1393     sg_virt(req->dst));
1394     dma_unmap_sg(dev, req->dst,
1395     cc_get_sgl_nents(dev, req->dst, size_to_unmap,
1396     - &dummy, &chained),
1397     + &dummy),
1398     DMA_BIDIRECTIONAL);
1399     }
1400     if (drvdata->coherent &&
1401     @@ -717,19 +665,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
1402     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1403     unsigned int hw_iv_size = areq_ctx->hw_iv_size;
1404     struct device *dev = drvdata_to_dev(drvdata);
1405     + gfp_t flags = cc_gfp_flags(&req->base);
1406     int rc = 0;
1407    
1408     if (!req->iv) {
1409     areq_ctx->gen_ctx.iv_dma_addr = 0;
1410     + areq_ctx->gen_ctx.iv = NULL;
1411     goto chain_iv_exit;
1412     }
1413    
1414     - areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
1415     - hw_iv_size,
1416     - DMA_BIDIRECTIONAL);
1417     + areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
1418     + if (!areq_ctx->gen_ctx.iv)
1419     + return -ENOMEM;
1420     +
1421     + areq_ctx->gen_ctx.iv_dma_addr =
1422     + dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
1423     + DMA_BIDIRECTIONAL);
1424     if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
1425     dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
1426     hw_iv_size, req->iv);
1427     + kzfree(areq_ctx->gen_ctx.iv);
1428     + areq_ctx->gen_ctx.iv = NULL;
1429     rc = -ENOMEM;
1430     goto chain_iv_exit;
1431     }
1432     @@ -1022,7 +978,6 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1433     unsigned int size_for_map = req->assoclen + req->cryptlen;
1434     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1435     u32 sg_index = 0;
1436     - bool chained = false;
1437     bool is_gcm4543 = areq_ctx->is_gcm4543;
1438     u32 size_to_skip = req->assoclen;
1439    
1440     @@ -1043,7 +998,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1441     size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1442     authsize : 0;
1443     src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
1444     - &src_last_bytes, &chained);
1445     + &src_last_bytes);
1446     sg_index = areq_ctx->src_sgl->length;
1447     //check where the data starts
1448     while (sg_index <= size_to_skip) {
1449     @@ -1085,7 +1040,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1450     }
1451    
1452     dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
1453     - &dst_last_bytes, &chained);
1454     + &dst_last_bytes);
1455     sg_index = areq_ctx->dst_sgl->length;
1456     offset = size_to_skip;
1457    
1458     @@ -1486,7 +1441,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1459     dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1460     curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1461     areq_ctx->in_nents =
1462     - cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
1463     + cc_get_sgl_nents(dev, src, nbytes, &dummy);
1464     sg_copy_to_buffer(src, areq_ctx->in_nents,
1465     &curr_buff[*curr_buff_cnt], nbytes);
1466     *curr_buff_cnt += nbytes;
1467     diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
1468     index d608a4faf662..be7f9bd5c559 100644
1469     --- a/drivers/crypto/ccree/cc_driver.h
1470     +++ b/drivers/crypto/ccree/cc_driver.h
1471     @@ -162,6 +162,7 @@ struct cc_alg_template {
1472    
1473     struct async_gen_req_ctx {
1474     dma_addr_t iv_dma_addr;
1475     + u8 *iv;
1476     enum drv_crypto_direction op_type;
1477     };
1478    
1479     diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
1480     index b4d0a6d983e0..09f708f6418e 100644
1481     --- a/drivers/crypto/ccree/cc_fips.c
1482     +++ b/drivers/crypto/ccree/cc_fips.c
1483     @@ -72,20 +72,28 @@ static inline void tee_fips_error(struct device *dev)
1484     dev_err(dev, "TEE reported error!\n");
1485     }
1486    
1487     +/*
1488     + * This function check if cryptocell tee fips error occurred
1489     + * and in such case triggers system error
1490     + */
1491     +void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
1492     +{
1493     + struct device *dev = drvdata_to_dev(p_drvdata);
1494     +
1495     + if (!cc_get_tee_fips_status(p_drvdata))
1496     + tee_fips_error(dev);
1497     +}
1498     +
1499     /* Deferred service handler, run as interrupt-fired tasklet */
1500     static void fips_dsr(unsigned long devarg)
1501     {
1502     struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
1503     - struct device *dev = drvdata_to_dev(drvdata);
1504     - u32 irq, state, val;
1505     + u32 irq, val;
1506    
1507     irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
1508    
1509     if (irq) {
1510     - state = cc_ioread(drvdata, CC_REG(GPR_HOST));
1511     -
1512     - if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
1513     - tee_fips_error(dev);
1514     + cc_tee_handle_fips_error(drvdata);
1515     }
1516    
1517     /* after verifing that there is nothing to do,
1518     @@ -113,8 +121,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
1519     dev_dbg(dev, "Initializing fips tasklet\n");
1520     tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
1521    
1522     - if (!cc_get_tee_fips_status(p_drvdata))
1523     - tee_fips_error(dev);
1524     + cc_tee_handle_fips_error(p_drvdata);
1525    
1526     return 0;
1527     }
1528     diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
1529     index 645e096a7a82..67d5fbfa09b5 100644
1530     --- a/drivers/crypto/ccree/cc_fips.h
1531     +++ b/drivers/crypto/ccree/cc_fips.h
1532     @@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata);
1533     void cc_fips_fini(struct cc_drvdata *drvdata);
1534     void fips_handler(struct cc_drvdata *drvdata);
1535     void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
1536     +void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
1537    
1538     #else /* CONFIG_CRYPTO_FIPS */
1539    
1540     @@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
1541     static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
1542     bool ok) {}
1543     static inline void fips_handler(struct cc_drvdata *drvdata) {}
1544     +static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
1545    
1546     #endif /* CONFIG_CRYPTO_FIPS */
1547    
1548     diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
1549     index b9313306c36f..2cadd7a21844 100644
1550     --- a/drivers/crypto/ccree/cc_hash.c
1551     +++ b/drivers/crypto/ccree/cc_hash.c
1552     @@ -64,6 +64,7 @@ struct cc_hash_alg {
1553     struct hash_key_req_ctx {
1554     u32 keylen;
1555     dma_addr_t key_dma_addr;
1556     + u8 *key;
1557     };
1558    
1559     /* hash per-session context */
1560     @@ -724,13 +725,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
1561     ctx->key_params.keylen = keylen;
1562     ctx->key_params.key_dma_addr = 0;
1563     ctx->is_hmac = true;
1564     + ctx->key_params.key = NULL;
1565    
1566     if (keylen) {
1567     + ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
1568     + if (!ctx->key_params.key)
1569     + return -ENOMEM;
1570     +
1571     ctx->key_params.key_dma_addr =
1572     - dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
1573     + dma_map_single(dev, (void *)ctx->key_params.key, keylen,
1574     + DMA_TO_DEVICE);
1575     if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
1576     dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
1577     - key, keylen);
1578     + ctx->key_params.key, keylen);
1579     + kzfree(ctx->key_params.key);
1580     return -ENOMEM;
1581     }
1582     dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
1583     @@ -881,6 +889,9 @@ out:
1584     dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1585     &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1586     }
1587     +
1588     + kzfree(ctx->key_params.key);
1589     +
1590     return rc;
1591     }
1592    
1593     @@ -907,11 +918,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
1594    
1595     ctx->key_params.keylen = keylen;
1596    
1597     + ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
1598     + if (!ctx->key_params.key)
1599     + return -ENOMEM;
1600     +
1601     ctx->key_params.key_dma_addr =
1602     - dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
1603     + dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
1604     if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
1605     dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
1606     key, keylen);
1607     + kzfree(ctx->key_params.key);
1608     return -ENOMEM;
1609     }
1610     dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
1611     @@ -963,6 +979,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
1612     dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1613     &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1614    
1615     + kzfree(ctx->key_params.key);
1616     +
1617     return rc;
1618     }
1619    
1620     @@ -1598,7 +1616,7 @@ static struct cc_hash_template driver_hash[] = {
1621     .setkey = cc_hash_setkey,
1622     .halg = {
1623     .digestsize = SHA224_DIGEST_SIZE,
1624     - .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1625     + .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
1626     },
1627     },
1628     .hash_mode = DRV_HASH_SHA224,
1629     @@ -1623,7 +1641,7 @@ static struct cc_hash_template driver_hash[] = {
1630     .setkey = cc_hash_setkey,
1631     .halg = {
1632     .digestsize = SHA384_DIGEST_SIZE,
1633     - .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1634     + .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1635     },
1636     },
1637     .hash_mode = DRV_HASH_SHA384,
1638     diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
1639     index 769458323394..1abec3896a78 100644
1640     --- a/drivers/crypto/ccree/cc_ivgen.c
1641     +++ b/drivers/crypto/ccree/cc_ivgen.c
1642     @@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata)
1643     }
1644    
1645     ivgen_ctx->pool = NULL_SRAM_ADDR;
1646     -
1647     - /* release "this" context */
1648     - kfree(ivgen_ctx);
1649     }
1650    
1651     /*!
1652     @@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
1653     int rc;
1654    
1655     /* Allocate "this" context */
1656     - ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
1657     + ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL);
1658     if (!ivgen_ctx)
1659     return -ENOMEM;
1660    
1661     + drvdata->ivgen_handle = ivgen_ctx;
1662     +
1663     /* Allocate pool's header for initial enc. key/IV */
1664     ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
1665     &ivgen_ctx->pool_meta_dma,
1666     @@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
1667     goto out;
1668     }
1669    
1670     - drvdata->ivgen_handle = ivgen_ctx;
1671     -
1672     return cc_init_iv_sram(drvdata);
1673    
1674     out:
1675     diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
1676     index d990f472e89f..79fc0a37ba6e 100644
1677     --- a/drivers/crypto/ccree/cc_pm.c
1678     +++ b/drivers/crypto/ccree/cc_pm.c
1679     @@ -11,6 +11,7 @@
1680     #include "cc_ivgen.h"
1681     #include "cc_hash.h"
1682     #include "cc_pm.h"
1683     +#include "cc_fips.h"
1684    
1685     #define POWER_DOWN_ENABLE 0x01
1686     #define POWER_DOWN_DISABLE 0x00
1687     @@ -25,13 +26,13 @@ int cc_pm_suspend(struct device *dev)
1688     int rc;
1689    
1690     dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
1691     - cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
1692     rc = cc_suspend_req_queue(drvdata);
1693     if (rc) {
1694     dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
1695     return rc;
1696     }
1697     fini_cc_regs(drvdata);
1698     + cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
1699     cc_clk_off(drvdata);
1700     return 0;
1701     }
1702     @@ -42,19 +43,21 @@ int cc_pm_resume(struct device *dev)
1703     struct cc_drvdata *drvdata = dev_get_drvdata(dev);
1704    
1705     dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
1706     - cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
1707     -
1708     + /* Enables the device source clk */
1709     rc = cc_clk_on(drvdata);
1710     if (rc) {
1711     dev_err(dev, "failed getting clock back on. We're toast.\n");
1712     return rc;
1713     }
1714    
1715     + cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
1716     rc = init_cc_regs(drvdata, false);
1717     if (rc) {
1718     dev_err(dev, "init_cc_regs (%x)\n", rc);
1719     return rc;
1720     }
1721     + /* check if tee fips error occurred during power down */
1722     + cc_tee_handle_fips_error(drvdata);
1723    
1724     rc = cc_resume_req_queue(drvdata);
1725     if (rc) {
1726     diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
1727     index 23305f22072f..204e4ad62c38 100644
1728     --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
1729     +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
1730     @@ -250,9 +250,14 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
1731     u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
1732     dev->sg_src->offset + dev->sg_src->length - ivsize;
1733    
1734     - /* store the iv that need to be updated in chain mode */
1735     - if (ctx->mode & RK_CRYPTO_DEC)
1736     + /* Store the iv that need to be updated in chain mode.
1737     + * And update the IV buffer to contain the next IV for decryption mode.
1738     + */
1739     + if (ctx->mode & RK_CRYPTO_DEC) {
1740     memcpy(ctx->iv, src_last_blk, ivsize);
1741     + sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
1742     + ivsize, dev->total - ivsize);
1743     + }
1744    
1745     err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
1746     if (!err)
1747     @@ -288,13 +293,19 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
1748     struct ablkcipher_request *req =
1749     ablkcipher_request_cast(dev->async_req);
1750     struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1751     + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1752     u32 ivsize = crypto_ablkcipher_ivsize(tfm);
1753    
1754     - if (ivsize == DES_BLOCK_SIZE)
1755     - memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
1756     - ivsize);
1757     - else if (ivsize == AES_BLOCK_SIZE)
1758     - memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
1759     + /* Update the IV buffer to contain the next IV for encryption mode. */
1760     + if (!(ctx->mode & RK_CRYPTO_DEC)) {
1761     + if (dev->aligned) {
1762     + memcpy(req->info, sg_virt(dev->sg_dst) +
1763     + dev->sg_dst->length - ivsize, ivsize);
1764     + } else {
1765     + memcpy(req->info, dev->addr_vir +
1766     + dev->count - ivsize, ivsize);
1767     + }
1768     + }
1769     }
1770    
1771     static void rk_update_iv(struct rk_crypto_info *dev)
1772     diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
1773     index d6a9f63d65ba..de78282b8f44 100644
1774     --- a/drivers/crypto/vmx/aesp8-ppc.pl
1775     +++ b/drivers/crypto/vmx/aesp8-ppc.pl
1776     @@ -1854,7 +1854,7 @@ Lctr32_enc8x_three:
1777     stvx_u $out1,$x10,$out
1778     stvx_u $out2,$x20,$out
1779     addi $out,$out,0x30
1780     - b Lcbc_dec8x_done
1781     + b Lctr32_enc8x_done
1782    
1783     .align 5
1784     Lctr32_enc8x_two:
1785     @@ -1866,7 +1866,7 @@ Lctr32_enc8x_two:
1786     stvx_u $out0,$x00,$out
1787     stvx_u $out1,$x10,$out
1788     addi $out,$out,0x20
1789     - b Lcbc_dec8x_done
1790     + b Lctr32_enc8x_done
1791    
1792     .align 5
1793     Lctr32_enc8x_one:
1794     diff --git a/drivers/dax/device.c b/drivers/dax/device.c
1795     index 948806e57cee..a89ebd94c670 100644
1796     --- a/drivers/dax/device.c
1797     +++ b/drivers/dax/device.c
1798     @@ -325,8 +325,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
1799    
1800     *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
1801    
1802     - return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
1803     - vmf->flags & FAULT_FLAG_WRITE);
1804     + return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
1805     }
1806    
1807     #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1808     @@ -376,8 +375,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
1809    
1810     *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
1811    
1812     - return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
1813     - vmf->flags & FAULT_FLAG_WRITE);
1814     + return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
1815     }
1816     #else
1817     static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
1818     diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
1819     index 522c7426f3a0..772258ee1f51 100644
1820     --- a/drivers/md/bcache/journal.c
1821     +++ b/drivers/md/bcache/journal.c
1822     @@ -540,11 +540,11 @@ static void journal_reclaim(struct cache_set *c)
1823     ca->sb.nr_this_dev);
1824     }
1825    
1826     - bkey_init(k);
1827     - SET_KEY_PTRS(k, n);
1828     -
1829     - if (n)
1830     + if (n) {
1831     + bkey_init(k);
1832     + SET_KEY_PTRS(k, n);
1833     c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
1834     + }
1835     out:
1836     if (!journal_full(&c->journal))
1837     __closure_wake_up(&c->journal.wait);
1838     @@ -671,6 +671,9 @@ static void journal_write_unlocked(struct closure *cl)
1839     ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
1840     }
1841    
1842     + /* If KEY_PTRS(k) == 0, this jset gets lost in air */
1843     + BUG_ON(i == 0);
1844     +
1845     atomic_dec_bug(&fifo_back(&c->journal.pin));
1846     bch_journal_next(&c->journal);
1847     journal_reclaim(c);
1848     diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
1849     index 03bb5cee2b83..2c0d35c882ed 100644
1850     --- a/drivers/md/bcache/super.c
1851     +++ b/drivers/md/bcache/super.c
1852     @@ -1511,6 +1511,7 @@ static void cache_set_free(struct closure *cl)
1853     bch_btree_cache_free(c);
1854     bch_journal_free(c);
1855    
1856     + mutex_lock(&bch_register_lock);
1857     for_each_cache(ca, c, i)
1858     if (ca) {
1859     ca->set = NULL;
1860     @@ -1529,7 +1530,6 @@ static void cache_set_free(struct closure *cl)
1861     mempool_exit(&c->search);
1862     kfree(c->devices);
1863    
1864     - mutex_lock(&bch_register_lock);
1865     list_del(&c->list);
1866     mutex_unlock(&bch_register_lock);
1867    
1868     diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
1869     index 6edffeed9953..18aae28845ec 100644
1870     --- a/drivers/mmc/core/queue.c
1871     +++ b/drivers/mmc/core/queue.c
1872     @@ -494,6 +494,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
1873     blk_mq_unquiesce_queue(q);
1874    
1875     blk_cleanup_queue(q);
1876     + blk_mq_free_tag_set(&mq->tag_set);
1877    
1878     /*
1879     * A request can be completed before the next request, potentially
1880     diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
1881     index a40bcc27f187..7fdac277e382 100644
1882     --- a/drivers/mmc/host/sdhci-of-arasan.c
1883     +++ b/drivers/mmc/host/sdhci-of-arasan.c
1884     @@ -814,7 +814,10 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
1885     host->mmc_host_ops.start_signal_voltage_switch =
1886     sdhci_arasan_voltage_switch;
1887     sdhci_arasan->has_cqe = true;
1888     - host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1889     + host->mmc->caps2 |= MMC_CAP2_CQE;
1890     +
1891     + if (!of_property_read_bool(np, "disable-cqe-dcmd"))
1892     + host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
1893     }
1894    
1895     ret = sdhci_arasan_add_host(sdhci_arasan);
1896     diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
1897     index af0a22019516..d60cbf23d9aa 100644
1898     --- a/drivers/mtd/spi-nor/intel-spi.c
1899     +++ b/drivers/mtd/spi-nor/intel-spi.c
1900     @@ -632,6 +632,10 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
1901     while (len > 0) {
1902     block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
1903    
1904     + /* Read cannot cross 4K boundary */
1905     + block_size = min_t(loff_t, from + block_size,
1906     + round_up(from + 1, SZ_4K)) - from;
1907     +
1908     writel(from, ispi->base + FADDR);
1909    
1910     val = readl(ispi->base + HSFSTS_CTL);
1911     @@ -685,6 +689,10 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
1912     while (len > 0) {
1913     block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
1914    
1915     + /* Write cannot cross 4K boundary */
1916     + block_size = min_t(loff_t, to + block_size,
1917     + round_up(to + 1, SZ_4K)) - to;
1918     +
1919     writel(to, ispi->base + FADDR);
1920    
1921     val = readl(ispi->base + HSFSTS_CTL);
1922     diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
1923     index 1eeb7be6aa34..452ad379ed70 100644
1924     --- a/drivers/nvdimm/label.c
1925     +++ b/drivers/nvdimm/label.c
1926     @@ -623,6 +623,17 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
1927     return &guid_null;
1928     }
1929    
1930     +static void reap_victim(struct nd_mapping *nd_mapping,
1931     + struct nd_label_ent *victim)
1932     +{
1933     + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1934     + u32 slot = to_slot(ndd, victim->label);
1935     +
1936     + dev_dbg(ndd->dev, "free: %d\n", slot);
1937     + nd_label_free_slot(ndd, slot);
1938     + victim->label = NULL;
1939     +}
1940     +
1941     static int __pmem_label_update(struct nd_region *nd_region,
1942     struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
1943     int pos, unsigned long flags)
1944     @@ -630,9 +641,9 @@ static int __pmem_label_update(struct nd_region *nd_region,
1945     struct nd_namespace_common *ndns = &nspm->nsio.common;
1946     struct nd_interleave_set *nd_set = nd_region->nd_set;
1947     struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1948     - struct nd_label_ent *label_ent, *victim = NULL;
1949     struct nd_namespace_label *nd_label;
1950     struct nd_namespace_index *nsindex;
1951     + struct nd_label_ent *label_ent;
1952     struct nd_label_id label_id;
1953     struct resource *res;
1954     unsigned long *free;
1955     @@ -701,18 +712,10 @@ static int __pmem_label_update(struct nd_region *nd_region,
1956     list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1957     if (!label_ent->label)
1958     continue;
1959     - if (memcmp(nspm->uuid, label_ent->label->uuid,
1960     - NSLABEL_UUID_LEN) != 0)
1961     - continue;
1962     - victim = label_ent;
1963     - list_move_tail(&victim->list, &nd_mapping->labels);
1964     - break;
1965     - }
1966     - if (victim) {
1967     - dev_dbg(ndd->dev, "free: %d\n", slot);
1968     - slot = to_slot(ndd, victim->label);
1969     - nd_label_free_slot(ndd, slot);
1970     - victim->label = NULL;
1971     + if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
1972     + || memcmp(nspm->uuid, label_ent->label->uuid,
1973     + NSLABEL_UUID_LEN) == 0)
1974     + reap_victim(nd_mapping, label_ent);
1975     }
1976    
1977     /* update index */
1978     diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
1979     index 73a444c41cde..5dc3b407d7bd 100644
1980     --- a/drivers/nvdimm/namespace_devs.c
1981     +++ b/drivers/nvdimm/namespace_devs.c
1982     @@ -1248,12 +1248,27 @@ static int namespace_update_uuid(struct nd_region *nd_region,
1983     for (i = 0; i < nd_region->ndr_mappings; i++) {
1984     struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1985     struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1986     + struct nd_label_ent *label_ent;
1987     struct resource *res;
1988    
1989     for_each_dpa_resource(ndd, res)
1990     if (strcmp(res->name, old_label_id.id) == 0)
1991     sprintf((void *) res->name, "%s",
1992     new_label_id.id);
1993     +
1994     + mutex_lock(&nd_mapping->lock);
1995     + list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1996     + struct nd_namespace_label *nd_label = label_ent->label;
1997     + struct nd_label_id label_id;
1998     +
1999     + if (!nd_label)
2000     + continue;
2001     + nd_label_gen_id(&label_id, nd_label->uuid,
2002     + __le32_to_cpu(nd_label->flags));
2003     + if (strcmp(old_label_id.id, label_id.id) == 0)
2004     + set_bit(ND_LABEL_REAP, &label_ent->flags);
2005     + }
2006     + mutex_unlock(&nd_mapping->lock);
2007     }
2008     kfree(*old_uuid);
2009     out:
2010     diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
2011     index 98317e7ce5b5..01e194a5824e 100644
2012     --- a/drivers/nvdimm/nd.h
2013     +++ b/drivers/nvdimm/nd.h
2014     @@ -113,8 +113,12 @@ struct nd_percpu_lane {
2015     spinlock_t lock;
2016     };
2017    
2018     +enum nd_label_flags {
2019     + ND_LABEL_REAP,
2020     +};
2021     struct nd_label_ent {
2022     struct list_head list;
2023     + unsigned long flags;
2024     struct nd_namespace_label *label;
2025     };
2026    
2027     diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
2028     index 735658ee1c60..c60659fb21de 100644
2029     --- a/drivers/power/supply/axp288_charger.c
2030     +++ b/drivers/power/supply/axp288_charger.c
2031     @@ -832,6 +832,10 @@ static int axp288_charger_probe(struct platform_device *pdev)
2032     /* Register charger interrupts */
2033     for (i = 0; i < CHRG_INTR_END; i++) {
2034     pirq = platform_get_irq(info->pdev, i);
2035     + if (pirq < 0) {
2036     + dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
2037     + return pirq;
2038     + }
2039     info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
2040     if (info->irq[i] < 0) {
2041     dev_warn(&info->pdev->dev,
2042     diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
2043     index 084c8ba9749d..ab0b6e78ca02 100644
2044     --- a/drivers/power/supply/axp288_fuel_gauge.c
2045     +++ b/drivers/power/supply/axp288_fuel_gauge.c
2046     @@ -695,6 +695,26 @@ intr_failed:
2047     * detection reports one despite it not being there.
2048     */
2049     static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
2050     + {
2051     + /* ACEPC T8 Cherry Trail Z8350 mini PC */
2052     + .matches = {
2053     + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
2054     + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
2055     + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
2056     + /* also match on somewhat unique bios-version */
2057     + DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
2058     + },
2059     + },
2060     + {
2061     + /* ACEPC T11 Cherry Trail Z8350 mini PC */
2062     + .matches = {
2063     + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
2064     + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
2065     + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
2066     + /* also match on somewhat unique bios-version */
2067     + DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
2068     + },
2069     + },
2070     {
2071     /* Intel Cherry Trail Compute Stick, Windows version */
2072     .matches = {
2073     diff --git a/drivers/tty/hvc/hvc_riscv_sbi.c b/drivers/tty/hvc/hvc_riscv_sbi.c
2074     index 75155bde2b88..31f53fa77e4a 100644
2075     --- a/drivers/tty/hvc/hvc_riscv_sbi.c
2076     +++ b/drivers/tty/hvc/hvc_riscv_sbi.c
2077     @@ -53,7 +53,6 @@ device_initcall(hvc_sbi_init);
2078     static int __init hvc_sbi_console_init(void)
2079     {
2080     hvc_instantiate(0, 0, &hvc_sbi_ops);
2081     - add_preferred_console("hvc", 0, NULL);
2082    
2083     return 0;
2084     }
2085     diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
2086     index 88312c6c92cc..0617e87ab343 100644
2087     --- a/drivers/tty/vt/keyboard.c
2088     +++ b/drivers/tty/vt/keyboard.c
2089     @@ -123,6 +123,7 @@ static const int NR_TYPES = ARRAY_SIZE(max_vals);
2090     static struct input_handler kbd_handler;
2091     static DEFINE_SPINLOCK(kbd_event_lock);
2092     static DEFINE_SPINLOCK(led_lock);
2093     +static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */
2094     static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
2095     static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
2096     static bool dead_key_next;
2097     @@ -1990,11 +1991,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
2098     char *p;
2099     u_char *q;
2100     u_char __user *up;
2101     - int sz;
2102     + int sz, fnw_sz;
2103     int delta;
2104     char *first_free, *fj, *fnw;
2105     int i, j, k;
2106     int ret;
2107     + unsigned long flags;
2108    
2109     if (!capable(CAP_SYS_TTY_CONFIG))
2110     perm = 0;
2111     @@ -2037,7 +2039,14 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
2112     goto reterr;
2113     }
2114    
2115     + fnw = NULL;
2116     + fnw_sz = 0;
2117     + /* race aginst other writers */
2118     + again:
2119     + spin_lock_irqsave(&func_buf_lock, flags);
2120     q = func_table[i];
2121     +
2122     + /* fj pointer to next entry after 'q' */
2123     first_free = funcbufptr + (funcbufsize - funcbufleft);
2124     for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
2125     ;
2126     @@ -2045,10 +2054,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
2127     fj = func_table[j];
2128     else
2129     fj = first_free;
2130     -
2131     + /* buffer usage increase by new entry */
2132     delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string);
2133     +
2134     if (delta <= funcbufleft) { /* it fits in current buf */
2135     if (j < MAX_NR_FUNC) {
2136     + /* make enough space for new entry at 'fj' */
2137     memmove(fj + delta, fj, first_free - fj);
2138     for (k = j; k < MAX_NR_FUNC; k++)
2139     if (func_table[k])
2140     @@ -2061,20 +2072,28 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
2141     sz = 256;
2142     while (sz < funcbufsize - funcbufleft + delta)
2143     sz <<= 1;
2144     - fnw = kmalloc(sz, GFP_KERNEL);
2145     - if(!fnw) {
2146     - ret = -ENOMEM;
2147     - goto reterr;
2148     + if (fnw_sz != sz) {
2149     + spin_unlock_irqrestore(&func_buf_lock, flags);
2150     + kfree(fnw);
2151     + fnw = kmalloc(sz, GFP_KERNEL);
2152     + fnw_sz = sz;
2153     + if (!fnw) {
2154     + ret = -ENOMEM;
2155     + goto reterr;
2156     + }
2157     + goto again;
2158     }
2159    
2160     if (!q)
2161     func_table[i] = fj;
2162     + /* copy data before insertion point to new location */
2163     if (fj > funcbufptr)
2164     memmove(fnw, funcbufptr, fj - funcbufptr);
2165     for (k = 0; k < j; k++)
2166     if (func_table[k])
2167     func_table[k] = fnw + (func_table[k] - funcbufptr);
2168    
2169     + /* copy data after insertion point to new location */
2170     if (first_free > fj) {
2171     memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj);
2172     for (k = j; k < MAX_NR_FUNC; k++)
2173     @@ -2087,7 +2106,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
2174     funcbufleft = funcbufleft - delta + sz - funcbufsize;
2175     funcbufsize = sz;
2176     }
2177     + /* finally insert item itself */
2178     strcpy(func_table[i], kbs->kb_string);
2179     + spin_unlock_irqrestore(&func_buf_lock, flags);
2180     break;
2181     }
2182     ret = 0;
2183     diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
2184     index 3e5ec1cee059..f93b948acfa5 100644
2185     --- a/drivers/tty/vt/vt.c
2186     +++ b/drivers/tty/vt/vt.c
2187     @@ -4155,8 +4155,6 @@ void do_blank_screen(int entering_gfx)
2188     return;
2189     }
2190    
2191     - if (blank_state != blank_normal_wait)
2192     - return;
2193     blank_state = blank_off;
2194    
2195     /* don't blank graphics */
2196     diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
2197     index ae750b1574a2..2a4f52c7be22 100644
2198     --- a/fs/btrfs/backref.c
2199     +++ b/fs/btrfs/backref.c
2200     @@ -1452,8 +1452,8 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
2201     * callers (such as fiemap) which want to know whether the extent is
2202     * shared but do not need a ref count.
2203     *
2204     - * This attempts to allocate a transaction in order to account for
2205     - * delayed refs, but continues on even when the alloc fails.
2206     + * This attempts to attach to the running transaction in order to account for
2207     + * delayed refs, but continues on even when no running transaction exists.
2208     *
2209     * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
2210     */
2211     @@ -1476,13 +1476,16 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
2212     tmp = ulist_alloc(GFP_NOFS);
2213     roots = ulist_alloc(GFP_NOFS);
2214     if (!tmp || !roots) {
2215     - ulist_free(tmp);
2216     - ulist_free(roots);
2217     - return -ENOMEM;
2218     + ret = -ENOMEM;
2219     + goto out;
2220     }
2221    
2222     - trans = btrfs_join_transaction(root);
2223     + trans = btrfs_attach_transaction(root);
2224     if (IS_ERR(trans)) {
2225     + if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
2226     + ret = PTR_ERR(trans);
2227     + goto out;
2228     + }
2229     trans = NULL;
2230     down_read(&fs_info->commit_root_sem);
2231     } else {
2232     @@ -1515,6 +1518,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
2233     } else {
2234     up_read(&fs_info->commit_root_sem);
2235     }
2236     +out:
2237     ulist_free(tmp);
2238     ulist_free(roots);
2239     return ret;
2240     @@ -1904,13 +1908,19 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
2241     extent_item_objectid);
2242    
2243     if (!search_commit_root) {
2244     - trans = btrfs_join_transaction(fs_info->extent_root);
2245     - if (IS_ERR(trans))
2246     - return PTR_ERR(trans);
2247     + trans = btrfs_attach_transaction(fs_info->extent_root);
2248     + if (IS_ERR(trans)) {
2249     + if (PTR_ERR(trans) != -ENOENT &&
2250     + PTR_ERR(trans) != -EROFS)
2251     + return PTR_ERR(trans);
2252     + trans = NULL;
2253     + }
2254     + }
2255     +
2256     + if (trans)
2257     btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2258     - } else {
2259     + else
2260     down_read(&fs_info->commit_root_sem);
2261     - }
2262    
2263     ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
2264     tree_mod_seq_elem.seq, &refs,
2265     @@ -1943,7 +1953,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
2266    
2267     free_leaf_list(refs);
2268     out:
2269     - if (!search_commit_root) {
2270     + if (trans) {
2271     btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2272     btrfs_end_transaction(trans);
2273     } else {
2274     diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
2275     index 48ac8b7c43a5..79ac1ebabaf7 100644
2276     --- a/fs/btrfs/ctree.c
2277     +++ b/fs/btrfs/ctree.c
2278     @@ -2436,6 +2436,16 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2279     if (tmp) {
2280     /* first we do an atomic uptodate check */
2281     if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2282     + /*
2283     + * Do extra check for first_key, eb can be stale due to
2284     + * being cached, read from scrub, or have multiple
2285     + * parents (shared tree blocks).
2286     + */
2287     + if (btrfs_verify_level_key(fs_info, tmp,
2288     + parent_level - 1, &first_key, gen)) {
2289     + free_extent_buffer(tmp);
2290     + return -EUCLEAN;
2291     + }
2292     *eb_ret = tmp;
2293     return 0;
2294     }
2295     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2296     index b4f61a3d560a..b2dc613ebed2 100644
2297     --- a/fs/btrfs/disk-io.c
2298     +++ b/fs/btrfs/disk-io.c
2299     @@ -408,9 +408,9 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
2300     return ret;
2301     }
2302    
2303     -static int verify_level_key(struct btrfs_fs_info *fs_info,
2304     - struct extent_buffer *eb, int level,
2305     - struct btrfs_key *first_key, u64 parent_transid)
2306     +int btrfs_verify_level_key(struct btrfs_fs_info *fs_info,
2307     + struct extent_buffer *eb, int level,
2308     + struct btrfs_key *first_key, u64 parent_transid)
2309     {
2310     int found_level;
2311     struct btrfs_key found_key;
2312     @@ -487,8 +487,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
2313     if (verify_parent_transid(io_tree, eb,
2314     parent_transid, 0))
2315     ret = -EIO;
2316     - else if (verify_level_key(fs_info, eb, level,
2317     - first_key, parent_transid))
2318     + else if (btrfs_verify_level_key(fs_info, eb, level,
2319     + first_key, parent_transid))
2320     ret = -EUCLEAN;
2321     else
2322     break;
2323     @@ -995,13 +995,18 @@ void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
2324     {
2325     struct extent_buffer *buf = NULL;
2326     struct inode *btree_inode = fs_info->btree_inode;
2327     + int ret;
2328    
2329     buf = btrfs_find_create_tree_block(fs_info, bytenr);
2330     if (IS_ERR(buf))
2331     return;
2332     - read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
2333     - buf, WAIT_NONE, 0);
2334     - free_extent_buffer(buf);
2335     +
2336     + ret = read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf,
2337     + WAIT_NONE, 0);
2338     + if (ret < 0)
2339     + free_extent_buffer_stale(buf);
2340     + else
2341     + free_extent_buffer(buf);
2342     }
2343    
2344     int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
2345     @@ -1021,12 +1026,12 @@ int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
2346     ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
2347     mirror_num);
2348     if (ret) {
2349     - free_extent_buffer(buf);
2350     + free_extent_buffer_stale(buf);
2351     return ret;
2352     }
2353    
2354     if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
2355     - free_extent_buffer(buf);
2356     + free_extent_buffer_stale(buf);
2357     return -EIO;
2358     } else if (extent_buffer_uptodate(buf)) {
2359     *eb = buf;
2360     @@ -1080,7 +1085,7 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
2361     ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
2362     level, first_key);
2363     if (ret) {
2364     - free_extent_buffer(buf);
2365     + free_extent_buffer_stale(buf);
2366     return ERR_PTR(ret);
2367     }
2368     return buf;
2369     diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
2370     index 4cccba22640f..7a4a60f26dbf 100644
2371     --- a/fs/btrfs/disk-io.h
2372     +++ b/fs/btrfs/disk-io.h
2373     @@ -39,6 +39,9 @@ static inline u64 btrfs_sb_offset(int mirror)
2374     struct btrfs_device;
2375     struct btrfs_fs_devices;
2376    
2377     +int btrfs_verify_level_key(struct btrfs_fs_info *fs_info,
2378     + struct extent_buffer *eb, int level,
2379     + struct btrfs_key *first_key, u64 parent_transid);
2380     struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
2381     u64 parent_transid, int level,
2382     struct btrfs_key *first_key);
2383     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2384     index c0db7785cede..809c2c307c64 100644
2385     --- a/fs/btrfs/extent-tree.c
2386     +++ b/fs/btrfs/extent-tree.c
2387     @@ -10789,9 +10789,9 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
2388     * held back allocations.
2389     */
2390     static int btrfs_trim_free_extents(struct btrfs_device *device,
2391     - u64 minlen, u64 *trimmed)
2392     + struct fstrim_range *range, u64 *trimmed)
2393     {
2394     - u64 start = 0, len = 0;
2395     + u64 start = range->start, len = 0;
2396     int ret;
2397    
2398     *trimmed = 0;
2399     @@ -10834,8 +10834,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
2400     if (!trans)
2401     up_read(&fs_info->commit_root_sem);
2402    
2403     - ret = find_free_dev_extent_start(trans, device, minlen, start,
2404     - &start, &len);
2405     + ret = find_free_dev_extent_start(trans, device, range->minlen,
2406     + start, &start, &len);
2407     if (trans) {
2408     up_read(&fs_info->commit_root_sem);
2409     btrfs_put_transaction(trans);
2410     @@ -10848,6 +10848,16 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
2411     break;
2412     }
2413    
2414     + /* If we are out of the passed range break */
2415     + if (start > range->start + range->len - 1) {
2416     + mutex_unlock(&fs_info->chunk_mutex);
2417     + ret = 0;
2418     + break;
2419     + }
2420     +
2421     + start = max(range->start, start);
2422     + len = min(range->len, len);
2423     +
2424     ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
2425     mutex_unlock(&fs_info->chunk_mutex);
2426    
2427     @@ -10857,6 +10867,10 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
2428     start += len;
2429     *trimmed += bytes;
2430    
2431     + /* We've trimmed enough */
2432     + if (*trimmed >= range->len)
2433     + break;
2434     +
2435     if (fatal_signal_pending(current)) {
2436     ret = -ERESTARTSYS;
2437     break;
2438     @@ -10940,8 +10954,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
2439     mutex_lock(&fs_info->fs_devices->device_list_mutex);
2440     devices = &fs_info->fs_devices->devices;
2441     list_for_each_entry(device, devices, dev_list) {
2442     - ret = btrfs_trim_free_extents(device, range->minlen,
2443     - &group_trimmed);
2444     + ret = btrfs_trim_free_extents(device, range, &group_trimmed);
2445     if (ret) {
2446     dev_failed++;
2447     dev_ret = ret;
2448     diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
2449     index 84cb6e5ef36c..635e419f2a2d 100644
2450     --- a/fs/btrfs/send.c
2451     +++ b/fs/btrfs/send.c
2452     @@ -6583,6 +6583,38 @@ commit_trans:
2453     return btrfs_commit_transaction(trans);
2454     }
2455    
2456     +/*
2457     + * Make sure any existing dellaloc is flushed for any root used by a send
2458     + * operation so that we do not miss any data and we do not race with writeback
2459     + * finishing and changing a tree while send is using the tree. This could
2460     + * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
2461     + * a send operation then uses the subvolume.
2462     + * After flushing delalloc ensure_commit_roots_uptodate() must be called.
2463     + */
2464     +static int flush_delalloc_roots(struct send_ctx *sctx)
2465     +{
2466     + struct btrfs_root *root = sctx->parent_root;
2467     + int ret;
2468     + int i;
2469     +
2470     + if (root) {
2471     + ret = btrfs_start_delalloc_snapshot(root);
2472     + if (ret)
2473     + return ret;
2474     + btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
2475     + }
2476     +
2477     + for (i = 0; i < sctx->clone_roots_cnt; i++) {
2478     + root = sctx->clone_roots[i].root;
2479     + ret = btrfs_start_delalloc_snapshot(root);
2480     + if (ret)
2481     + return ret;
2482     + btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
2483     + }
2484     +
2485     + return 0;
2486     +}
2487     +
2488     static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
2489     {
2490     spin_lock(&root->root_item_lock);
2491     @@ -6807,6 +6839,10 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
2492     NULL);
2493     sort_clone_roots = 1;
2494    
2495     + ret = flush_delalloc_roots(sctx);
2496     + if (ret)
2497     + goto out;
2498     +
2499     ret = ensure_commit_roots_uptodate(sctx);
2500     if (ret)
2501     goto out;
2502     diff --git a/fs/dax.c b/fs/dax.c
2503     index 09fa70683c41..004c8ac1117c 100644
2504     --- a/fs/dax.c
2505     +++ b/fs/dax.c
2506     @@ -1660,8 +1660,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
2507     }
2508    
2509     trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
2510     - result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
2511     - write);
2512     + result = vmf_insert_pfn_pmd(vmf, pfn, write);
2513     break;
2514     case IOMAP_UNWRITTEN:
2515     case IOMAP_HOLE:
2516     @@ -1775,8 +1774,7 @@ static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
2517     break;
2518     #ifdef CONFIG_FS_DAX_PMD
2519     case PE_SIZE_PMD:
2520     - ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
2521     - pfn, true);
2522     + ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
2523     break;
2524     #endif
2525     default:
2526     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
2527     index 2ddf7833350d..1ee51d3a978a 100644
2528     --- a/fs/ext4/ext4.h
2529     +++ b/fs/ext4/ext4.h
2530     @@ -1670,6 +1670,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
2531     #define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */
2532     #define EXT4_FEATURE_INCOMPAT_ENCRYPT 0x10000
2533    
2534     +extern void ext4_update_dynamic_rev(struct super_block *sb);
2535     +
2536     #define EXT4_FEATURE_COMPAT_FUNCS(name, flagname) \
2537     static inline bool ext4_has_feature_##name(struct super_block *sb) \
2538     { \
2539     @@ -1678,6 +1680,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
2540     } \
2541     static inline void ext4_set_feature_##name(struct super_block *sb) \
2542     { \
2543     + ext4_update_dynamic_rev(sb); \
2544     EXT4_SB(sb)->s_es->s_feature_compat |= \
2545     cpu_to_le32(EXT4_FEATURE_COMPAT_##flagname); \
2546     } \
2547     @@ -1695,6 +1698,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
2548     } \
2549     static inline void ext4_set_feature_##name(struct super_block *sb) \
2550     { \
2551     + ext4_update_dynamic_rev(sb); \
2552     EXT4_SB(sb)->s_es->s_feature_ro_compat |= \
2553     cpu_to_le32(EXT4_FEATURE_RO_COMPAT_##flagname); \
2554     } \
2555     @@ -1712,6 +1716,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
2556     } \
2557     static inline void ext4_set_feature_##name(struct super_block *sb) \
2558     { \
2559     + ext4_update_dynamic_rev(sb); \
2560     EXT4_SB(sb)->s_es->s_feature_incompat |= \
2561     cpu_to_le32(EXT4_FEATURE_INCOMPAT_##flagname); \
2562     } \
2563     @@ -2679,7 +2684,6 @@ do { \
2564    
2565     #endif
2566    
2567     -extern void ext4_update_dynamic_rev(struct super_block *sb);
2568     extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
2569     __u32 compat);
2570     extern int ext4_update_rocompat_feature(handle_t *handle,
2571     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
2572     index 72a361d5ef74..45aea792d22a 100644
2573     --- a/fs/ext4/extents.c
2574     +++ b/fs/ext4/extents.c
2575     @@ -1035,6 +1035,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
2576     __le32 border;
2577     ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
2578     int err = 0;
2579     + size_t ext_size = 0;
2580    
2581     /* make decision: where to split? */
2582     /* FIXME: now decision is simplest: at current extent */
2583     @@ -1126,6 +1127,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
2584     le16_add_cpu(&neh->eh_entries, m);
2585     }
2586    
2587     + /* zero out unused area in the extent block */
2588     + ext_size = sizeof(struct ext4_extent_header) +
2589     + sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
2590     + memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
2591     ext4_extent_block_csum_set(inode, neh);
2592     set_buffer_uptodate(bh);
2593     unlock_buffer(bh);
2594     @@ -1205,6 +1210,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
2595     sizeof(struct ext4_extent_idx) * m);
2596     le16_add_cpu(&neh->eh_entries, m);
2597     }
2598     + /* zero out unused area in the extent block */
2599     + ext_size = sizeof(struct ext4_extent_header) +
2600     + (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
2601     + memset(bh->b_data + ext_size, 0,
2602     + inode->i_sb->s_blocksize - ext_size);
2603     ext4_extent_block_csum_set(inode, neh);
2604     set_buffer_uptodate(bh);
2605     unlock_buffer(bh);
2606     @@ -1270,6 +1280,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
2607     ext4_fsblk_t newblock, goal = 0;
2608     struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
2609     int err = 0;
2610     + size_t ext_size = 0;
2611    
2612     /* Try to prepend new index to old one */
2613     if (ext_depth(inode))
2614     @@ -1295,9 +1306,11 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
2615     goto out;
2616     }
2617    
2618     + ext_size = sizeof(EXT4_I(inode)->i_data);
2619     /* move top-level index/leaf into new block */
2620     - memmove(bh->b_data, EXT4_I(inode)->i_data,
2621     - sizeof(EXT4_I(inode)->i_data));
2622     + memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
2623     + /* zero out unused area in the extent block */
2624     + memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
2625    
2626     /* set size of new block */
2627     neh = ext_block_hdr(bh);
2628     diff --git a/fs/ext4/file.c b/fs/ext4/file.c
2629     index 98ec11f69cd4..2c5baa5e8291 100644
2630     --- a/fs/ext4/file.c
2631     +++ b/fs/ext4/file.c
2632     @@ -264,6 +264,13 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2633     }
2634    
2635     ret = __generic_file_write_iter(iocb, from);
2636     + /*
2637     + * Unaligned direct AIO must be the only IO in flight. Otherwise
2638     + * overlapping aligned IO after unaligned might result in data
2639     + * corruption.
2640     + */
2641     + if (ret == -EIOCBQUEUED && unaligned_aio)
2642     + ext4_unwritten_wait(inode);
2643     inode_unlock(inode);
2644    
2645     if (ret > 0)
2646     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2647     index 2c43c5b92229..67e8aa35197e 100644
2648     --- a/fs/ext4/inode.c
2649     +++ b/fs/ext4/inode.c
2650     @@ -5320,7 +5320,6 @@ static int ext4_do_update_inode(handle_t *handle,
2651     err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
2652     if (err)
2653     goto out_brelse;
2654     - ext4_update_dynamic_rev(sb);
2655     ext4_set_feature_large_file(sb);
2656     ext4_handle_sync(handle);
2657     err = ext4_handle_dirty_super(handle, sb);
2658     @@ -5971,7 +5970,7 @@ int ext4_expand_extra_isize(struct inode *inode,
2659    
2660     ext4_write_lock_xattr(inode, &no_expand);
2661    
2662     - BUFFER_TRACE(iloc.bh, "get_write_access");
2663     + BUFFER_TRACE(iloc->bh, "get_write_access");
2664     error = ext4_journal_get_write_access(handle, iloc->bh);
2665     if (error) {
2666     brelse(iloc->bh);
2667     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
2668     index 5f24fdc140ad..53d57cdf3c4d 100644
2669     --- a/fs/ext4/ioctl.c
2670     +++ b/fs/ext4/ioctl.c
2671     @@ -977,7 +977,7 @@ mext_out:
2672     if (err == 0)
2673     err = err2;
2674     mnt_drop_write_file(filp);
2675     - if (!err && (o_group > EXT4_SB(sb)->s_groups_count) &&
2676     + if (!err && (o_group < EXT4_SB(sb)->s_groups_count) &&
2677     ext4_has_group_desc_csum(sb) &&
2678     test_opt(sb, INIT_INODE_TABLE))
2679     err = ext4_register_li_request(sb, o_group);
2680     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
2681     index e29fce2fbf25..cc229f3357f7 100644
2682     --- a/fs/ext4/mballoc.c
2683     +++ b/fs/ext4/mballoc.c
2684     @@ -1539,7 +1539,7 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block,
2685     ex->fe_len += 1 << order;
2686     }
2687    
2688     - if (ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))) {
2689     + if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
2690     /* Should never happen! (but apparently sometimes does?!?) */
2691     WARN_ON(1);
2692     ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "
2693     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
2694     index 4f8de2b9e87e..4c5aa5df6573 100644
2695     --- a/fs/ext4/namei.c
2696     +++ b/fs/ext4/namei.c
2697     @@ -871,12 +871,15 @@ static void dx_release(struct dx_frame *frames)
2698     {
2699     struct dx_root_info *info;
2700     int i;
2701     + unsigned int indirect_levels;
2702    
2703     if (frames[0].bh == NULL)
2704     return;
2705    
2706     info = &((struct dx_root *)frames[0].bh->b_data)->info;
2707     - for (i = 0; i <= info->indirect_levels; i++) {
2708     + /* save local copy, "info" may be freed after brelse() */
2709     + indirect_levels = info->indirect_levels;
2710     + for (i = 0; i <= indirect_levels; i++) {
2711     if (frames[i].bh == NULL)
2712     break;
2713     brelse(frames[i].bh);
2714     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2715     index e7ae26e36c9c..4d5c0fc9d23a 100644
2716     --- a/fs/ext4/resize.c
2717     +++ b/fs/ext4/resize.c
2718     @@ -874,6 +874,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
2719     err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
2720     if (unlikely(err)) {
2721     ext4_std_error(sb, err);
2722     + iloc.bh = NULL;
2723     goto errout;
2724     }
2725     brelse(dind);
2726     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2727     index abba7ece78e9..a270391228af 100644
2728     --- a/fs/ext4/super.c
2729     +++ b/fs/ext4/super.c
2730     @@ -698,7 +698,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
2731     jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
2732     save_error_info(sb, function, line);
2733     }
2734     - if (test_opt(sb, ERRORS_PANIC)) {
2735     + if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
2736     if (EXT4_SB(sb)->s_journal &&
2737     !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
2738     return;
2739     @@ -2259,7 +2259,6 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2740     es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
2741     le16_add_cpu(&es->s_mnt_count, 1);
2742     ext4_update_tstamp(es, s_mtime);
2743     - ext4_update_dynamic_rev(sb);
2744     if (sbi->s_journal)
2745     ext4_set_feature_journal_needs_recovery(sb);
2746    
2747     @@ -3514,6 +3513,37 @@ int ext4_calculate_overhead(struct super_block *sb)
2748     return 0;
2749     }
2750    
2751     +static void ext4_clamp_want_extra_isize(struct super_block *sb)
2752     +{
2753     + struct ext4_sb_info *sbi = EXT4_SB(sb);
2754     + struct ext4_super_block *es = sbi->s_es;
2755     +
2756     + /* determine the minimum size of new large inodes, if present */
2757     + if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
2758     + sbi->s_want_extra_isize == 0) {
2759     + sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
2760     + EXT4_GOOD_OLD_INODE_SIZE;
2761     + if (ext4_has_feature_extra_isize(sb)) {
2762     + if (sbi->s_want_extra_isize <
2763     + le16_to_cpu(es->s_want_extra_isize))
2764     + sbi->s_want_extra_isize =
2765     + le16_to_cpu(es->s_want_extra_isize);
2766     + if (sbi->s_want_extra_isize <
2767     + le16_to_cpu(es->s_min_extra_isize))
2768     + sbi->s_want_extra_isize =
2769     + le16_to_cpu(es->s_min_extra_isize);
2770     + }
2771     + }
2772     + /* Check if enough inode space is available */
2773     + if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
2774     + sbi->s_inode_size) {
2775     + sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
2776     + EXT4_GOOD_OLD_INODE_SIZE;
2777     + ext4_msg(sb, KERN_INFO,
2778     + "required extra inode space not available");
2779     + }
2780     +}
2781     +
2782     static void ext4_set_resv_clusters(struct super_block *sb)
2783     {
2784     ext4_fsblk_t resv_clusters;
2785     @@ -4239,7 +4269,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2786     "data=, fs mounted w/o journal");
2787     goto failed_mount_wq;
2788     }
2789     - sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
2790     + sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
2791     clear_opt(sb, JOURNAL_CHECKSUM);
2792     clear_opt(sb, DATA_FLAGS);
2793     sbi->s_journal = NULL;
2794     @@ -4388,30 +4418,7 @@ no_journal:
2795     } else if (ret)
2796     goto failed_mount4a;
2797    
2798     - /* determine the minimum size of new large inodes, if present */
2799     - if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
2800     - sbi->s_want_extra_isize == 0) {
2801     - sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
2802     - EXT4_GOOD_OLD_INODE_SIZE;
2803     - if (ext4_has_feature_extra_isize(sb)) {
2804     - if (sbi->s_want_extra_isize <
2805     - le16_to_cpu(es->s_want_extra_isize))
2806     - sbi->s_want_extra_isize =
2807     - le16_to_cpu(es->s_want_extra_isize);
2808     - if (sbi->s_want_extra_isize <
2809     - le16_to_cpu(es->s_min_extra_isize))
2810     - sbi->s_want_extra_isize =
2811     - le16_to_cpu(es->s_min_extra_isize);
2812     - }
2813     - }
2814     - /* Check if enough inode space is available */
2815     - if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
2816     - sbi->s_inode_size) {
2817     - sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
2818     - EXT4_GOOD_OLD_INODE_SIZE;
2819     - ext4_msg(sb, KERN_INFO, "required extra inode space not"
2820     - "available");
2821     - }
2822     + ext4_clamp_want_extra_isize(sb);
2823    
2824     ext4_set_resv_clusters(sb);
2825    
2826     @@ -5197,6 +5204,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
2827     goto restore_opts;
2828     }
2829    
2830     + ext4_clamp_want_extra_isize(sb);
2831     +
2832     if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
2833     test_opt(sb, JOURNAL_CHECKSUM)) {
2834     ext4_msg(sb, KERN_ERR, "changing journal_checksum "
2835     diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
2836     index 006c277dc22e..f73fc90e5daa 100644
2837     --- a/fs/ext4/xattr.c
2838     +++ b/fs/ext4/xattr.c
2839     @@ -1700,7 +1700,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
2840    
2841     /* No failures allowed past this point. */
2842    
2843     - if (!s->not_found && here->e_value_size && here->e_value_offs) {
2844     + if (!s->not_found && here->e_value_size && !here->e_value_inum) {
2845     /* Remove the old value. */
2846     void *first_val = s->base + min_offs;
2847     size_t offs = le16_to_cpu(here->e_value_offs);
2848     diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
2849     index 82ce6d4f7e31..9544e2f8b79f 100644
2850     --- a/fs/fs-writeback.c
2851     +++ b/fs/fs-writeback.c
2852     @@ -530,8 +530,6 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
2853    
2854     isw->inode = inode;
2855    
2856     - atomic_inc(&isw_nr_in_flight);
2857     -
2858     /*
2859     * In addition to synchronizing among switchers, I_WB_SWITCH tells
2860     * the RCU protected stat update paths to grab the i_page
2861     @@ -539,6 +537,9 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
2862     * Let's continue after I_WB_SWITCH is guaranteed to be visible.
2863     */
2864     call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
2865     +
2866     + atomic_inc(&isw_nr_in_flight);
2867     +
2868     goto out_unlock;
2869    
2870     out_free:
2871     @@ -908,7 +909,11 @@ restart:
2872     void cgroup_writeback_umount(void)
2873     {
2874     if (atomic_read(&isw_nr_in_flight)) {
2875     - synchronize_rcu();
2876     + /*
2877     + * Use rcu_barrier() to wait for all pending callbacks to
2878     + * ensure that all in-flight wb switches are in the workqueue.
2879     + */
2880     + rcu_barrier();
2881     flush_workqueue(isw_wq);
2882     }
2883     }
2884     diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
2885     index a3a3d256fb0e..7a24f91af29e 100644
2886     --- a/fs/hugetlbfs/inode.c
2887     +++ b/fs/hugetlbfs/inode.c
2888     @@ -426,9 +426,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
2889     u32 hash;
2890    
2891     index = page->index;
2892     - hash = hugetlb_fault_mutex_hash(h, current->mm,
2893     - &pseudo_vma,
2894     - mapping, index, 0);
2895     + hash = hugetlb_fault_mutex_hash(h, mapping, index, 0);
2896     mutex_lock(&hugetlb_fault_mutex_table[hash]);
2897    
2898     /*
2899     @@ -625,8 +623,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
2900     addr = index * hpage_size;
2901    
2902     /* mutex taken here, fault path and hole punch */
2903     - hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
2904     - index, addr);
2905     + hash = hugetlb_fault_mutex_hash(h, mapping, index, addr);
2906     mutex_lock(&hugetlb_fault_mutex_table[hash]);
2907    
2908     /* See if already present in mapping to avoid alloc/free */
2909     diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
2910     index 88f2a49338a1..e9cf88f0bc29 100644
2911     --- a/fs/jbd2/journal.c
2912     +++ b/fs/jbd2/journal.c
2913     @@ -1366,6 +1366,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
2914     journal_superblock_t *sb = journal->j_superblock;
2915     int ret;
2916    
2917     + /* Buffer got discarded which means block device got invalidated */
2918     + if (!buffer_mapped(bh))
2919     + return -EIO;
2920     +
2921     trace_jbd2_write_superblock(journal, write_flags);
2922     if (!(journal->j_flags & JBD2_BARRIER))
2923     write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
2924     @@ -2385,22 +2389,19 @@ static struct kmem_cache *jbd2_journal_head_cache;
2925     static atomic_t nr_journal_heads = ATOMIC_INIT(0);
2926     #endif
2927    
2928     -static int jbd2_journal_init_journal_head_cache(void)
2929     +static int __init jbd2_journal_init_journal_head_cache(void)
2930     {
2931     - int retval;
2932     -
2933     - J_ASSERT(jbd2_journal_head_cache == NULL);
2934     + J_ASSERT(!jbd2_journal_head_cache);
2935     jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head",
2936     sizeof(struct journal_head),
2937     0, /* offset */
2938     SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU,
2939     NULL); /* ctor */
2940     - retval = 0;
2941     if (!jbd2_journal_head_cache) {
2942     - retval = -ENOMEM;
2943     printk(KERN_EMERG "JBD2: no memory for journal_head cache\n");
2944     + return -ENOMEM;
2945     }
2946     - return retval;
2947     + return 0;
2948     }
2949    
2950     static void jbd2_journal_destroy_journal_head_cache(void)
2951     @@ -2646,28 +2647,38 @@ static void __exit jbd2_remove_jbd_stats_proc_entry(void)
2952    
2953     struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache;
2954    
2955     +static int __init jbd2_journal_init_inode_cache(void)
2956     +{
2957     + J_ASSERT(!jbd2_inode_cache);
2958     + jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
2959     + if (!jbd2_inode_cache) {
2960     + pr_emerg("JBD2: failed to create inode cache\n");
2961     + return -ENOMEM;
2962     + }
2963     + return 0;
2964     +}
2965     +
2966     static int __init jbd2_journal_init_handle_cache(void)
2967     {
2968     + J_ASSERT(!jbd2_handle_cache);
2969     jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY);
2970     - if (jbd2_handle_cache == NULL) {
2971     + if (!jbd2_handle_cache) {
2972     printk(KERN_EMERG "JBD2: failed to create handle cache\n");
2973     return -ENOMEM;
2974     }
2975     - jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
2976     - if (jbd2_inode_cache == NULL) {
2977     - printk(KERN_EMERG "JBD2: failed to create inode cache\n");
2978     - kmem_cache_destroy(jbd2_handle_cache);
2979     - return -ENOMEM;
2980     - }
2981     return 0;
2982     }
2983    
2984     +static void jbd2_journal_destroy_inode_cache(void)
2985     +{
2986     + kmem_cache_destroy(jbd2_inode_cache);
2987     + jbd2_inode_cache = NULL;
2988     +}
2989     +
2990     static void jbd2_journal_destroy_handle_cache(void)
2991     {
2992     kmem_cache_destroy(jbd2_handle_cache);
2993     jbd2_handle_cache = NULL;
2994     - kmem_cache_destroy(jbd2_inode_cache);
2995     - jbd2_inode_cache = NULL;
2996     }
2997    
2998     /*
2999     @@ -2678,11 +2689,15 @@ static int __init journal_init_caches(void)
3000     {
3001     int ret;
3002    
3003     - ret = jbd2_journal_init_revoke_caches();
3004     + ret = jbd2_journal_init_revoke_record_cache();
3005     + if (ret == 0)
3006     + ret = jbd2_journal_init_revoke_table_cache();
3007     if (ret == 0)
3008     ret = jbd2_journal_init_journal_head_cache();
3009     if (ret == 0)
3010     ret = jbd2_journal_init_handle_cache();
3011     + if (ret == 0)
3012     + ret = jbd2_journal_init_inode_cache();
3013     if (ret == 0)
3014     ret = jbd2_journal_init_transaction_cache();
3015     return ret;
3016     @@ -2690,9 +2705,11 @@ static int __init journal_init_caches(void)
3017    
3018     static void jbd2_journal_destroy_caches(void)
3019     {
3020     - jbd2_journal_destroy_revoke_caches();
3021     + jbd2_journal_destroy_revoke_record_cache();
3022     + jbd2_journal_destroy_revoke_table_cache();
3023     jbd2_journal_destroy_journal_head_cache();
3024     jbd2_journal_destroy_handle_cache();
3025     + jbd2_journal_destroy_inode_cache();
3026     jbd2_journal_destroy_transaction_cache();
3027     jbd2_journal_destroy_slabs();
3028     }
3029     diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
3030     index a1143e57a718..69b9bc329964 100644
3031     --- a/fs/jbd2/revoke.c
3032     +++ b/fs/jbd2/revoke.c
3033     @@ -178,33 +178,41 @@ static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
3034     return NULL;
3035     }
3036    
3037     -void jbd2_journal_destroy_revoke_caches(void)
3038     +void jbd2_journal_destroy_revoke_record_cache(void)
3039     {
3040     kmem_cache_destroy(jbd2_revoke_record_cache);
3041     jbd2_revoke_record_cache = NULL;
3042     +}
3043     +
3044     +void jbd2_journal_destroy_revoke_table_cache(void)
3045     +{
3046     kmem_cache_destroy(jbd2_revoke_table_cache);
3047     jbd2_revoke_table_cache = NULL;
3048     }
3049    
3050     -int __init jbd2_journal_init_revoke_caches(void)
3051     +int __init jbd2_journal_init_revoke_record_cache(void)
3052     {
3053     J_ASSERT(!jbd2_revoke_record_cache);
3054     - J_ASSERT(!jbd2_revoke_table_cache);
3055     -
3056     jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s,
3057     SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY);
3058     - if (!jbd2_revoke_record_cache)
3059     - goto record_cache_failure;
3060    
3061     + if (!jbd2_revoke_record_cache) {
3062     + pr_emerg("JBD2: failed to create revoke_record cache\n");
3063     + return -ENOMEM;
3064     + }
3065     + return 0;
3066     +}
3067     +
3068     +int __init jbd2_journal_init_revoke_table_cache(void)
3069     +{
3070     + J_ASSERT(!jbd2_revoke_table_cache);
3071     jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s,
3072     SLAB_TEMPORARY);
3073     - if (!jbd2_revoke_table_cache)
3074     - goto table_cache_failure;
3075     - return 0;
3076     -table_cache_failure:
3077     - jbd2_journal_destroy_revoke_caches();
3078     -record_cache_failure:
3079     + if (!jbd2_revoke_table_cache) {
3080     + pr_emerg("JBD2: failed to create revoke_table cache\n");
3081     return -ENOMEM;
3082     + }
3083     + return 0;
3084     }
3085    
3086     static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
3087     diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
3088     index 914e725c82c4..e20a6703531f 100644
3089     --- a/fs/jbd2/transaction.c
3090     +++ b/fs/jbd2/transaction.c
3091     @@ -42,9 +42,11 @@ int __init jbd2_journal_init_transaction_cache(void)
3092     0,
3093     SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
3094     NULL);
3095     - if (transaction_cache)
3096     - return 0;
3097     - return -ENOMEM;
3098     + if (!transaction_cache) {
3099     + pr_emerg("JBD2: failed to create transaction cache\n");
3100     + return -ENOMEM;
3101     + }
3102     + return 0;
3103     }
3104    
3105     void jbd2_journal_destroy_transaction_cache(void)
3106     diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
3107     index 4bf8d5854b27..af2888d23de3 100644
3108     --- a/fs/ocfs2/export.c
3109     +++ b/fs/ocfs2/export.c
3110     @@ -148,16 +148,24 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
3111     u64 blkno;
3112     struct dentry *parent;
3113     struct inode *dir = d_inode(child);
3114     + int set;
3115    
3116     trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
3117     (unsigned long long)OCFS2_I(dir)->ip_blkno);
3118    
3119     + status = ocfs2_nfs_sync_lock(OCFS2_SB(dir->i_sb), 1);
3120     + if (status < 0) {
3121     + mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status);
3122     + parent = ERR_PTR(status);
3123     + goto bail;
3124     + }
3125     +
3126     status = ocfs2_inode_lock(dir, NULL, 0);
3127     if (status < 0) {
3128     if (status != -ENOENT)
3129     mlog_errno(status);
3130     parent = ERR_PTR(status);
3131     - goto bail;
3132     + goto unlock_nfs_sync;
3133     }
3134    
3135     status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
3136     @@ -166,11 +174,31 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
3137     goto bail_unlock;
3138     }
3139    
3140     + status = ocfs2_test_inode_bit(OCFS2_SB(dir->i_sb), blkno, &set);
3141     + if (status < 0) {
3142     + if (status == -EINVAL) {
3143     + status = -ESTALE;
3144     + } else
3145     + mlog(ML_ERROR, "test inode bit failed %d\n", status);
3146     + parent = ERR_PTR(status);
3147     + goto bail_unlock;
3148     + }
3149     +
3150     + trace_ocfs2_get_dentry_test_bit(status, set);
3151     + if (!set) {
3152     + status = -ESTALE;
3153     + parent = ERR_PTR(status);
3154     + goto bail_unlock;
3155     + }
3156     +
3157     parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
3158    
3159     bail_unlock:
3160     ocfs2_inode_unlock(dir, 0);
3161    
3162     +unlock_nfs_sync:
3163     + ocfs2_nfs_sync_unlock(OCFS2_SB(dir->i_sb), 1);
3164     +
3165     bail:
3166     trace_ocfs2_get_parent_end(parent);
3167    
3168     diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
3169     index 5fcb845b9fec..8cf2218b46a7 100644
3170     --- a/fs/pstore/inode.c
3171     +++ b/fs/pstore/inode.c
3172     @@ -482,12 +482,10 @@ static struct file_system_type pstore_fs_type = {
3173     .kill_sb = pstore_kill_sb,
3174     };
3175    
3176     -static int __init init_pstore_fs(void)
3177     +int __init pstore_init_fs(void)
3178     {
3179     int err;
3180    
3181     - pstore_choose_compression();
3182     -
3183     /* Create a convenient mount point for people to access pstore */
3184     err = sysfs_create_mount_point(fs_kobj, "pstore");
3185     if (err)
3186     @@ -500,14 +498,9 @@ static int __init init_pstore_fs(void)
3187     out:
3188     return err;
3189     }
3190     -module_init(init_pstore_fs)
3191    
3192     -static void __exit exit_pstore_fs(void)
3193     +void __exit pstore_exit_fs(void)
3194     {
3195     unregister_filesystem(&pstore_fs_type);
3196     sysfs_remove_mount_point(fs_kobj, "pstore");
3197     }
3198     -module_exit(exit_pstore_fs)
3199     -
3200     -MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
3201     -MODULE_LICENSE("GPL");
3202     diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
3203     index fb767e28aeb2..7062ea4bc57c 100644
3204     --- a/fs/pstore/internal.h
3205     +++ b/fs/pstore/internal.h
3206     @@ -37,7 +37,8 @@ extern bool pstore_is_mounted(void);
3207     extern void pstore_record_init(struct pstore_record *record,
3208     struct pstore_info *psi);
3209    
3210     -/* Called during module_init() */
3211     -extern void __init pstore_choose_compression(void);
3212     +/* Called during pstore init/exit. */
3213     +int __init pstore_init_fs(void);
3214     +void __exit pstore_exit_fs(void);
3215    
3216     #endif
3217     diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
3218     index 15e99d5a681d..b821054ca3ed 100644
3219     --- a/fs/pstore/platform.c
3220     +++ b/fs/pstore/platform.c
3221     @@ -274,36 +274,56 @@ static int pstore_decompress(void *in, void *out,
3222    
3223     static void allocate_buf_for_compression(void)
3224     {
3225     + struct crypto_comp *ctx;
3226     + int size;
3227     + char *buf;
3228     +
3229     + /* Skip if not built-in or compression backend not selected yet. */
3230     if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
3231     return;
3232    
3233     + /* Skip if no pstore backend yet or compression init already done. */
3234     + if (!psinfo || tfm)
3235     + return;
3236     +
3237     if (!crypto_has_comp(zbackend->name, 0, 0)) {
3238     - pr_err("No %s compression\n", zbackend->name);
3239     + pr_err("Unknown compression: %s\n", zbackend->name);
3240     return;
3241     }
3242    
3243     - big_oops_buf_sz = zbackend->zbufsize(psinfo->bufsize);
3244     - if (big_oops_buf_sz <= 0)
3245     + size = zbackend->zbufsize(psinfo->bufsize);
3246     + if (size <= 0) {
3247     + pr_err("Invalid compression size for %s: %d\n",
3248     + zbackend->name, size);
3249     return;
3250     + }
3251    
3252     - big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
3253     - if (!big_oops_buf) {
3254     - pr_err("allocate compression buffer error!\n");
3255     + buf = kmalloc(size, GFP_KERNEL);
3256     + if (!buf) {
3257     + pr_err("Failed %d byte compression buffer allocation for: %s\n",
3258     + size, zbackend->name);
3259     return;
3260     }
3261    
3262     - tfm = crypto_alloc_comp(zbackend->name, 0, 0);
3263     - if (IS_ERR_OR_NULL(tfm)) {
3264     - kfree(big_oops_buf);
3265     - big_oops_buf = NULL;
3266     - pr_err("crypto_alloc_comp() failed!\n");
3267     + ctx = crypto_alloc_comp(zbackend->name, 0, 0);
3268     + if (IS_ERR_OR_NULL(ctx)) {
3269     + kfree(buf);
3270     + pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
3271     + PTR_ERR(ctx));
3272     return;
3273     }
3274     +
3275     + /* A non-NULL big_oops_buf indicates compression is available. */
3276     + tfm = ctx;
3277     + big_oops_buf_sz = size;
3278     + big_oops_buf = buf;
3279     +
3280     + pr_info("Using compression: %s\n", zbackend->name);
3281     }
3282    
3283     static void free_buf_for_compression(void)
3284     {
3285     - if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && !IS_ERR_OR_NULL(tfm))
3286     + if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
3287     crypto_free_comp(tfm);
3288     kfree(big_oops_buf);
3289     big_oops_buf = NULL;
3290     @@ -774,14 +794,43 @@ void __init pstore_choose_compression(void)
3291     for (step = zbackends; step->name; step++) {
3292     if (!strcmp(compress, step->name)) {
3293     zbackend = step;
3294     - pr_info("using %s compression\n", zbackend->name);
3295     return;
3296     }
3297     }
3298     }
3299    
3300     +static int __init pstore_init(void)
3301     +{
3302     + int ret;
3303     +
3304     + pstore_choose_compression();
3305     +
3306     + /*
3307     + * Check if any pstore backends registered earlier but did not
3308     + * initialize compression because crypto was not ready. If so,
3309     + * initialize compression now.
3310     + */
3311     + allocate_buf_for_compression();
3312     +
3313     + ret = pstore_init_fs();
3314     + if (ret)
3315     + return ret;
3316     +
3317     + return 0;
3318     +}
3319     +late_initcall(pstore_init);
3320     +
3321     +static void __exit pstore_exit(void)
3322     +{
3323     + pstore_exit_fs();
3324     +}
3325     +module_exit(pstore_exit)
3326     +
3327     module_param(compress, charp, 0444);
3328     MODULE_PARM_DESC(compress, "Pstore compression to use");
3329    
3330     module_param(backend, charp, 0444);
3331     MODULE_PARM_DESC(backend, "Pstore backend to use");
3332     +
3333     +MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
3334     +MODULE_LICENSE("GPL");
3335     diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
3336     index eb67bb7f04de..44ed6b193d2e 100644
3337     --- a/fs/pstore/ram.c
3338     +++ b/fs/pstore/ram.c
3339     @@ -956,7 +956,7 @@ static int __init ramoops_init(void)
3340    
3341     return ret;
3342     }
3343     -late_initcall(ramoops_init);
3344     +postcore_initcall(ramoops_init);
3345    
3346     static void __exit ramoops_exit(void)
3347     {
3348     diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
3349     index fdcb45999b26..77227224ca88 100644
3350     --- a/include/linux/huge_mm.h
3351     +++ b/include/linux/huge_mm.h
3352     @@ -47,10 +47,8 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
3353     extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
3354     unsigned long addr, pgprot_t newprot,
3355     int prot_numa);
3356     -vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
3357     - pmd_t *pmd, pfn_t pfn, bool write);
3358     -vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
3359     - pud_t *pud, pfn_t pfn, bool write);
3360     +vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
3361     +vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
3362     enum transparent_hugepage_flag {
3363     TRANSPARENT_HUGEPAGE_FLAG,
3364     TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
3365     diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
3366     index 087fd5f48c91..d34112fb3d52 100644
3367     --- a/include/linux/hugetlb.h
3368     +++ b/include/linux/hugetlb.h
3369     @@ -123,9 +123,7 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
3370     void free_huge_page(struct page *page);
3371     void hugetlb_fix_reserve_counts(struct inode *inode);
3372     extern struct mutex *hugetlb_fault_mutex_table;
3373     -u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3374     - struct vm_area_struct *vma,
3375     - struct address_space *mapping,
3376     +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3377     pgoff_t idx, unsigned long address);
3378    
3379     pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
3380     diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
3381     index b708e5169d1d..583b82b5a1e9 100644
3382     --- a/include/linux/jbd2.h
3383     +++ b/include/linux/jbd2.h
3384     @@ -1317,7 +1317,7 @@ extern void __wait_on_journal (journal_t *);
3385    
3386     /* Transaction cache support */
3387     extern void jbd2_journal_destroy_transaction_cache(void);
3388     -extern int jbd2_journal_init_transaction_cache(void);
3389     +extern int __init jbd2_journal_init_transaction_cache(void);
3390     extern void jbd2_journal_free_transaction(transaction_t *);
3391    
3392     /*
3393     @@ -1445,8 +1445,10 @@ static inline void jbd2_free_inode(struct jbd2_inode *jinode)
3394     /* Primary revoke support */
3395     #define JOURNAL_REVOKE_DEFAULT_HASH 256
3396     extern int jbd2_journal_init_revoke(journal_t *, int);
3397     -extern void jbd2_journal_destroy_revoke_caches(void);
3398     -extern int jbd2_journal_init_revoke_caches(void);
3399     +extern void jbd2_journal_destroy_revoke_record_cache(void);
3400     +extern void jbd2_journal_destroy_revoke_table_cache(void);
3401     +extern int __init jbd2_journal_init_revoke_record_cache(void);
3402     +extern int __init jbd2_journal_init_revoke_table_cache(void);
3403    
3404     extern void jbd2_journal_destroy_revoke(journal_t *);
3405     extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
3406     diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
3407     index 5d42859cb441..844fc2973392 100644
3408     --- a/include/linux/mfd/da9063/registers.h
3409     +++ b/include/linux/mfd/da9063/registers.h
3410     @@ -215,9 +215,9 @@
3411    
3412     /* DA9063 Configuration registers */
3413     /* OTP */
3414     -#define DA9063_REG_OPT_COUNT 0x101
3415     -#define DA9063_REG_OPT_ADDR 0x102
3416     -#define DA9063_REG_OPT_DATA 0x103
3417     +#define DA9063_REG_OTP_CONT 0x101
3418     +#define DA9063_REG_OTP_ADDR 0x102
3419     +#define DA9063_REG_OTP_DATA 0x103
3420    
3421     /* Customer Trim and Configuration */
3422     #define DA9063_REG_T_OFFSET 0x104
3423     diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
3424     index ad2a9a852aea..b4fd5a7c2aaa 100644
3425     --- a/include/linux/mfd/max77620.h
3426     +++ b/include/linux/mfd/max77620.h
3427     @@ -136,8 +136,8 @@
3428     #define MAX77620_FPS_PERIOD_MIN_US 40
3429     #define MAX20024_FPS_PERIOD_MIN_US 20
3430    
3431     -#define MAX77620_FPS_PERIOD_MAX_US 2560
3432     -#define MAX20024_FPS_PERIOD_MAX_US 5120
3433     +#define MAX20024_FPS_PERIOD_MAX_US 2560
3434     +#define MAX77620_FPS_PERIOD_MAX_US 5120
3435    
3436     #define MAX77620_REG_FPS_GPIO1 0x54
3437     #define MAX77620_REG_FPS_GPIO2 0x55
3438     diff --git a/kernel/fork.c b/kernel/fork.c
3439     index 64ef113e387e..69874db3fba8 100644
3440     --- a/kernel/fork.c
3441     +++ b/kernel/fork.c
3442     @@ -907,6 +907,15 @@ static void mm_init_aio(struct mm_struct *mm)
3443     #endif
3444     }
3445    
3446     +static __always_inline void mm_clear_owner(struct mm_struct *mm,
3447     + struct task_struct *p)
3448     +{
3449     +#ifdef CONFIG_MEMCG
3450     + if (mm->owner == p)
3451     + WRITE_ONCE(mm->owner, NULL);
3452     +#endif
3453     +}
3454     +
3455     static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
3456     {
3457     #ifdef CONFIG_MEMCG
3458     @@ -1286,6 +1295,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
3459     free_pt:
3460     /* don't put binfmt in mmput, we haven't got module yet */
3461     mm->binfmt = NULL;
3462     + mm_init_owner(mm, NULL);
3463     mmput(mm);
3464    
3465     fail_nomem:
3466     @@ -1617,6 +1627,21 @@ static inline void rcu_copy_process(struct task_struct *p)
3467     #endif /* #ifdef CONFIG_TASKS_RCU */
3468     }
3469    
3470     +static void __delayed_free_task(struct rcu_head *rhp)
3471     +{
3472     + struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
3473     +
3474     + free_task(tsk);
3475     +}
3476     +
3477     +static __always_inline void delayed_free_task(struct task_struct *tsk)
3478     +{
3479     + if (IS_ENABLED(CONFIG_MEMCG))
3480     + call_rcu(&tsk->rcu, __delayed_free_task);
3481     + else
3482     + free_task(tsk);
3483     +}
3484     +
3485     /*
3486     * This creates a new process as a copy of the old one,
3487     * but does not actually start it yet.
3488     @@ -2072,8 +2097,10 @@ bad_fork_cleanup_io:
3489     bad_fork_cleanup_namespaces:
3490     exit_task_namespaces(p);
3491     bad_fork_cleanup_mm:
3492     - if (p->mm)
3493     + if (p->mm) {
3494     + mm_clear_owner(p->mm, p);
3495     mmput(p->mm);
3496     + }
3497     bad_fork_cleanup_signal:
3498     if (!(clone_flags & CLONE_THREAD))
3499     free_signal_struct(p->signal);
3500     @@ -2104,7 +2131,7 @@ bad_fork_cleanup_count:
3501     bad_fork_free:
3502     p->state = TASK_DEAD;
3503     put_task_stack(p);
3504     - free_task(p);
3505     + delayed_free_task(p);
3506     fork_out:
3507     spin_lock_irq(&current->sighand->siglock);
3508     hlist_del_init(&delayed.node);
3509     diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
3510     index ef909357b84e..e41e4b4b5267 100644
3511     --- a/kernel/locking/rwsem-xadd.c
3512     +++ b/kernel/locking/rwsem-xadd.c
3513     @@ -130,6 +130,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
3514     {
3515     struct rwsem_waiter *waiter, *tmp;
3516     long oldcount, woken = 0, adjustment = 0;
3517     + struct list_head wlist;
3518    
3519     /*
3520     * Take a peek at the queue head waiter such that we can determine
3521     @@ -188,18 +189,42 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
3522     * of the queue. We know that woken will be at least 1 as we accounted
3523     * for above. Note we increment the 'active part' of the count by the
3524     * number of readers before waking any processes up.
3525     + *
3526     + * We have to do wakeup in 2 passes to prevent the possibility that
3527     + * the reader count may be decremented before it is incremented. It
3528     + * is because the to-be-woken waiter may not have slept yet. So it
3529     + * may see waiter->task got cleared, finish its critical section and
3530     + * do an unlock before the reader count increment.
3531     + *
3532     + * 1) Collect the read-waiters in a separate list, count them and
3533     + * fully increment the reader count in rwsem.
3534     + * 2) For each waiters in the new list, clear waiter->task and
3535     + * put them into wake_q to be woken up later.
3536     */
3537     - list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
3538     - struct task_struct *tsk;
3539     -
3540     + list_for_each_entry(waiter, &sem->wait_list, list) {
3541     if (waiter->type == RWSEM_WAITING_FOR_WRITE)
3542     break;
3543    
3544     woken++;
3545     - tsk = waiter->task;
3546     + }
3547     + list_cut_before(&wlist, &sem->wait_list, &waiter->list);
3548     +
3549     + adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
3550     + if (list_empty(&sem->wait_list)) {
3551     + /* hit end of list above */
3552     + adjustment -= RWSEM_WAITING_BIAS;
3553     + }
3554     +
3555     + if (adjustment)
3556     + atomic_long_add(adjustment, &sem->count);
3557     +
3558     + /* 2nd pass */
3559     + list_for_each_entry_safe(waiter, tmp, &wlist, list) {
3560     + struct task_struct *tsk;
3561    
3562     + tsk = waiter->task;
3563     get_task_struct(tsk);
3564     - list_del(&waiter->list);
3565     +
3566     /*
3567     * Ensure calling get_task_struct() before setting the reader
3568     * waiter to nil such that rwsem_down_read_failed() cannot
3569     @@ -215,15 +240,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
3570     /* wake_q_add() already take the task ref */
3571     put_task_struct(tsk);
3572     }
3573     -
3574     - adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
3575     - if (list_empty(&sem->wait_list)) {
3576     - /* hit end of list above */
3577     - adjustment -= RWSEM_WAITING_BIAS;
3578     - }
3579     -
3580     - if (adjustment)
3581     - atomic_long_add(adjustment, &sem->count);
3582     }
3583    
3584     /*
3585     diff --git a/lib/iov_iter.c b/lib/iov_iter.c
3586     index 8be175df3075..acd7b97c16f2 100644
3587     --- a/lib/iov_iter.c
3588     +++ b/lib/iov_iter.c
3589     @@ -817,8 +817,21 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache);
3590    
3591     static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
3592     {
3593     - struct page *head = compound_head(page);
3594     - size_t v = n + offset + page_address(page) - page_address(head);
3595     + struct page *head;
3596     + size_t v = n + offset;
3597     +
3598     + /*
3599     + * The general case needs to access the page order in order
3600     + * to compute the page size.
3601     + * However, we mostly deal with order-0 pages and thus can
3602     + * avoid a possible cache line miss for requests that fit all
3603     + * page orders.
3604     + */
3605     + if (n <= v && v <= PAGE_SIZE)
3606     + return true;
3607     +
3608     + head = compound_head(page);
3609     + v += (page - head) << PAGE_SHIFT;
3610    
3611     if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
3612     return true;
3613     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
3614     index 7d08e89361ee..6fad1864ba03 100644
3615     --- a/mm/huge_memory.c
3616     +++ b/mm/huge_memory.c
3617     @@ -772,11 +772,13 @@ out_unlock:
3618     pte_free(mm, pgtable);
3619     }
3620    
3621     -vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
3622     - pmd_t *pmd, pfn_t pfn, bool write)
3623     +vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
3624     {
3625     + unsigned long addr = vmf->address & PMD_MASK;
3626     + struct vm_area_struct *vma = vmf->vma;
3627     pgprot_t pgprot = vma->vm_page_prot;
3628     pgtable_t pgtable = NULL;
3629     +
3630     /*
3631     * If we had pmd_special, we could avoid all these restrictions,
3632     * but we need to be consistent with PTEs and architectures that
3633     @@ -799,7 +801,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
3634    
3635     track_pfn_insert(vma, &pgprot, pfn);
3636    
3637     - insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable);
3638     + insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
3639     return VM_FAULT_NOPAGE;
3640     }
3641     EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
3642     @@ -848,10 +850,12 @@ out_unlock:
3643     spin_unlock(ptl);
3644     }
3645    
3646     -vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
3647     - pud_t *pud, pfn_t pfn, bool write)
3648     +vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
3649     {
3650     + unsigned long addr = vmf->address & PUD_MASK;
3651     + struct vm_area_struct *vma = vmf->vma;
3652     pgprot_t pgprot = vma->vm_page_prot;
3653     +
3654     /*
3655     * If we had pud_special, we could avoid all these restrictions,
3656     * but we need to be consistent with PTEs and architectures that
3657     @@ -868,7 +872,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
3658    
3659     track_pfn_insert(vma, &pgprot, pfn);
3660    
3661     - insert_pfn_pud(vma, addr, pud, pfn, pgprot, write);
3662     + insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
3663     return VM_FAULT_NOPAGE;
3664     }
3665     EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
3666     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3667     index 5fb779cda972..0bbb033d7d8c 100644
3668     --- a/mm/hugetlb.c
3669     +++ b/mm/hugetlb.c
3670     @@ -1572,8 +1572,9 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
3671     */
3672     if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
3673     SetPageHugeTemporary(page);
3674     + spin_unlock(&hugetlb_lock);
3675     put_page(page);
3676     - page = NULL;
3677     + return NULL;
3678     } else {
3679     h->surplus_huge_pages++;
3680     h->surplus_huge_pages_node[page_to_nid(page)]++;
3681     @@ -3777,8 +3778,7 @@ retry:
3682     * handling userfault. Reacquire after handling
3683     * fault to make calling code simpler.
3684     */
3685     - hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
3686     - idx, haddr);
3687     + hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
3688     mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3689     ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3690     mutex_lock(&hugetlb_fault_mutex_table[hash]);
3691     @@ -3886,21 +3886,14 @@ backout_unlocked:
3692     }
3693    
3694     #ifdef CONFIG_SMP
3695     -u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3696     - struct vm_area_struct *vma,
3697     - struct address_space *mapping,
3698     +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3699     pgoff_t idx, unsigned long address)
3700     {
3701     unsigned long key[2];
3702     u32 hash;
3703    
3704     - if (vma->vm_flags & VM_SHARED) {
3705     - key[0] = (unsigned long) mapping;
3706     - key[1] = idx;
3707     - } else {
3708     - key[0] = (unsigned long) mm;
3709     - key[1] = address >> huge_page_shift(h);
3710     - }
3711     + key[0] = (unsigned long) mapping;
3712     + key[1] = idx;
3713    
3714     hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3715    
3716     @@ -3911,9 +3904,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3717     * For uniprocesor systems we always use a single mutex, so just
3718     * return 0 and avoid the hashing overhead.
3719     */
3720     -u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3721     - struct vm_area_struct *vma,
3722     - struct address_space *mapping,
3723     +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3724     pgoff_t idx, unsigned long address)
3725     {
3726     return 0;
3727     @@ -3958,7 +3949,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3728     * get spurious allocation failures if two CPUs race to instantiate
3729     * the same page in the page cache.
3730     */
3731     - hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
3732     + hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
3733     mutex_lock(&hugetlb_fault_mutex_table[hash]);
3734    
3735     entry = huge_ptep_get(ptep);
3736     diff --git a/mm/mincore.c b/mm/mincore.c
3737     index fc37afe226e6..2732c8c0764c 100644
3738     --- a/mm/mincore.c
3739     +++ b/mm/mincore.c
3740     @@ -169,6 +169,22 @@ out:
3741     return 0;
3742     }
3743    
3744     +static inline bool can_do_mincore(struct vm_area_struct *vma)
3745     +{
3746     + if (vma_is_anonymous(vma))
3747     + return true;
3748     + if (!vma->vm_file)
3749     + return false;
3750     + /*
3751     + * Reveal pagecache information only for non-anonymous mappings that
3752     + * correspond to the files the calling process could (if tried) open
3753     + * for writing; otherwise we'd be including shared non-exclusive
3754     + * mappings, which opens a side channel.
3755     + */
3756     + return inode_owner_or_capable(file_inode(vma->vm_file)) ||
3757     + inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
3758     +}
3759     +
3760     /*
3761     * Do a chunk of "sys_mincore()". We've already checked
3762     * all the arguments, we hold the mmap semaphore: we should
3763     @@ -189,8 +205,13 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
3764     vma = find_vma(current->mm, addr);
3765     if (!vma || addr < vma->vm_start)
3766     return -ENOMEM;
3767     - mincore_walk.mm = vma->vm_mm;
3768     end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
3769     + if (!can_do_mincore(vma)) {
3770     + unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
3771     + memset(vec, 1, pages);
3772     + return pages;
3773     + }
3774     + mincore_walk.mm = vma->vm_mm;
3775     err = walk_page_range(addr, end, &mincore_walk);
3776     if (err < 0)
3777     return err;
3778     diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
3779     index 458acda96f20..7529d3fcc899 100644
3780     --- a/mm/userfaultfd.c
3781     +++ b/mm/userfaultfd.c
3782     @@ -271,8 +271,7 @@ retry:
3783     */
3784     idx = linear_page_index(dst_vma, dst_addr);
3785     mapping = dst_vma->vm_file->f_mapping;
3786     - hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
3787     - idx, dst_addr);
3788     + hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr);
3789     mutex_lock(&hugetlb_fault_mutex_table[hash]);
3790    
3791     err = -ENOMEM;
3792     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
3793     index cb587dce67a9..35931a18418f 100644
3794     --- a/sound/pci/hda/patch_hdmi.c
3795     +++ b/sound/pci/hda/patch_hdmi.c
3796     @@ -1548,9 +1548,11 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
3797     ret = !repoll || !eld->monitor_present || eld->eld_valid;
3798    
3799     jack = snd_hda_jack_tbl_get(codec, pin_nid);
3800     - if (jack)
3801     + if (jack) {
3802     jack->block_report = !ret;
3803     -
3804     + jack->pin_sense = (eld->monitor_present && eld->eld_valid) ?
3805     + AC_PINSENSE_PRESENCE : 0;
3806     + }
3807     mutex_unlock(&per_pin->lock);
3808     return ret;
3809     }
3810     @@ -1660,6 +1662,11 @@ static void hdmi_repoll_eld(struct work_struct *work)
3811     container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
3812     struct hda_codec *codec = per_pin->codec;
3813     struct hdmi_spec *spec = codec->spec;
3814     + struct hda_jack_tbl *jack;
3815     +
3816     + jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
3817     + if (jack)
3818     + jack->jack_dirty = 1;
3819    
3820     if (per_pin->repoll_count++ > 6)
3821     per_pin->repoll_count = 0;
3822     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3823     index 75a0be2aa9c2..adce5b60d5b4 100644
3824     --- a/sound/pci/hda/patch_realtek.c
3825     +++ b/sound/pci/hda/patch_realtek.c
3826     @@ -477,12 +477,45 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
3827     set_eapd(codec, *p, on);
3828     }
3829    
3830     +static int find_ext_mic_pin(struct hda_codec *codec);
3831     +
3832     +static void alc_headset_mic_no_shutup(struct hda_codec *codec)
3833     +{
3834     + const struct hda_pincfg *pin;
3835     + int mic_pin = find_ext_mic_pin(codec);
3836     + int i;
3837     +
3838     + /* don't shut up pins when unloading the driver; otherwise it breaks
3839     + * the default pin setup at the next load of the driver
3840     + */
3841     + if (codec->bus->shutdown)
3842     + return;
3843     +
3844     + snd_array_for_each(&codec->init_pins, i, pin) {
3845     + /* use read here for syncing after issuing each verb */
3846     + if (pin->nid != mic_pin)
3847     + snd_hda_codec_read(codec, pin->nid, 0,
3848     + AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
3849     + }
3850     +
3851     + codec->pins_shutup = 1;
3852     +}
3853     +
3854     static void alc_shutup_pins(struct hda_codec *codec)
3855     {
3856     struct alc_spec *spec = codec->spec;
3857    
3858     - if (!spec->no_shutup_pins)
3859     - snd_hda_shutup_pins(codec);
3860     + switch (codec->core.vendor_id) {
3861     + case 0x10ec0286:
3862     + case 0x10ec0288:
3863     + case 0x10ec0298:
3864     + alc_headset_mic_no_shutup(codec);
3865     + break;
3866     + default:
3867     + if (!spec->no_shutup_pins)
3868     + snd_hda_shutup_pins(codec);
3869     + break;
3870     + }
3871     }
3872    
3873     /* generic shutup callback;
3874     @@ -803,11 +836,10 @@ static int alc_init(struct hda_codec *codec)
3875     if (spec->init_hook)
3876     spec->init_hook(codec);
3877    
3878     + snd_hda_gen_init(codec);
3879     alc_fix_pll(codec);
3880     alc_auto_init_amp(codec, spec->init_amp);
3881    
3882     - snd_hda_gen_init(codec);
3883     -
3884     snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
3885    
3886     return 0;
3887     @@ -2924,27 +2956,6 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
3888     return alc_parse_auto_config(codec, alc269_ignore, ssids);
3889     }
3890    
3891     -static int find_ext_mic_pin(struct hda_codec *codec);
3892     -
3893     -static void alc286_shutup(struct hda_codec *codec)
3894     -{
3895     - const struct hda_pincfg *pin;
3896     - int i;
3897     - int mic_pin = find_ext_mic_pin(codec);
3898     - /* don't shut up pins when unloading the driver; otherwise it breaks
3899     - * the default pin setup at the next load of the driver
3900     - */
3901     - if (codec->bus->shutdown)
3902     - return;
3903     - snd_array_for_each(&codec->init_pins, i, pin) {
3904     - /* use read here for syncing after issuing each verb */
3905     - if (pin->nid != mic_pin)
3906     - snd_hda_codec_read(codec, pin->nid, 0,
3907     - AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
3908     - }
3909     - codec->pins_shutup = 1;
3910     -}
3911     -
3912     static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
3913     {
3914     alc_update_coef_idx(codec, 0x04, 1 << 11, power_up ? (1 << 11) : 0);
3915     @@ -6841,6 +6852,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3916     SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
3917     SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
3918     SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
3919     + SND_PCI_QUIRK(0x1558, 0x8550, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
3920     + SND_PCI_QUIRK(0x1558, 0x8551, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
3921     + SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
3922     + SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
3923     SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
3924     SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
3925     SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
3926     @@ -6883,7 +6898,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3927     SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
3928     SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
3929     SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
3930     - SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
3931     + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
3932     SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3933     SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
3934     SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
3935     @@ -7608,7 +7623,6 @@ static int patch_alc269(struct hda_codec *codec)
3936     case 0x10ec0286:
3937     case 0x10ec0288:
3938     spec->codec_variant = ALC269_TYPE_ALC286;
3939     - spec->shutup = alc286_shutup;
3940     break;
3941     case 0x10ec0298:
3942     spec->codec_variant = ALC269_TYPE_ALC298;
3943     diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
3944     index b61d518f4fef..63487240b61e 100644
3945     --- a/sound/soc/codecs/hdac_hdmi.c
3946     +++ b/sound/soc/codecs/hdac_hdmi.c
3947     @@ -1828,6 +1828,17 @@ static int hdmi_codec_probe(struct snd_soc_component *component)
3948     /* Imp: Store the card pointer in hda_codec */
3949     hdmi->card = dapm->card->snd_card;
3950    
3951     + /*
3952     + * Setup a device_link between card device and HDMI codec device.
3953     + * The card device is the consumer and the HDMI codec device is
3954     + * the supplier. With this setting, we can make sure that the audio
3955     + * domain in display power will be always turned on before operating
3956     + * on the HDMI audio codec registers.
3957     + * Let's use the flag DL_FLAG_AUTOREMOVE_CONSUMER. This can make
3958     + * sure the device link is freed when the machine driver is removed.
3959     + */
3960     + device_link_add(component->card->dev, &hdev->dev, DL_FLAG_RPM_ACTIVE |
3961     + DL_FLAG_AUTOREMOVE_CONSUMER);
3962     /*
3963     * hdac_device core already sets the state to active and calls
3964     * get_noresume. So enable runtime and set the device to suspend.
3965     diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
3966     index c97f21836c66..f06ae43650a3 100644
3967     --- a/sound/soc/codecs/max98090.c
3968     +++ b/sound/soc/codecs/max98090.c
3969     @@ -1209,14 +1209,14 @@ static const struct snd_soc_dapm_widget max98090_dapm_widgets[] = {
3970     &max98090_right_rcv_mixer_controls[0],
3971     ARRAY_SIZE(max98090_right_rcv_mixer_controls)),
3972    
3973     - SND_SOC_DAPM_MUX("LINMOD Mux", M98090_REG_LOUTR_MIXER,
3974     - M98090_LINMOD_SHIFT, 0, &max98090_linmod_mux),
3975     + SND_SOC_DAPM_MUX("LINMOD Mux", SND_SOC_NOPM, 0, 0,
3976     + &max98090_linmod_mux),
3977    
3978     - SND_SOC_DAPM_MUX("MIXHPLSEL Mux", M98090_REG_HP_CONTROL,
3979     - M98090_MIXHPLSEL_SHIFT, 0, &max98090_mixhplsel_mux),
3980     + SND_SOC_DAPM_MUX("MIXHPLSEL Mux", SND_SOC_NOPM, 0, 0,
3981     + &max98090_mixhplsel_mux),
3982    
3983     - SND_SOC_DAPM_MUX("MIXHPRSEL Mux", M98090_REG_HP_CONTROL,
3984     - M98090_MIXHPRSEL_SHIFT, 0, &max98090_mixhprsel_mux),
3985     + SND_SOC_DAPM_MUX("MIXHPRSEL Mux", SND_SOC_NOPM, 0, 0,
3986     + &max98090_mixhprsel_mux),
3987    
3988     SND_SOC_DAPM_PGA("HP Left Out", M98090_REG_OUTPUT_ENABLE,
3989     M98090_HPLEN_SHIFT, 0, NULL, 0),
3990     diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
3991     index bd51f3655ee3..06abcd017650 100644
3992     --- a/sound/soc/codecs/rt5677-spi.c
3993     +++ b/sound/soc/codecs/rt5677-spi.c
3994     @@ -58,13 +58,15 @@ static DEFINE_MUTEX(spi_mutex);
3995     * RT5677_SPI_READ/WRITE_32: Transfer 4 bytes
3996     * RT5677_SPI_READ/WRITE_BURST: Transfer any multiples of 8 bytes
3997     *
3998     - * For example, reading 260 bytes at 0x60030002 uses the following commands:
3999     - * 0x60030002 RT5677_SPI_READ_16 2 bytes
4000     + * Note:
4001     + * 16 Bit writes and reads are restricted to the address range
4002     + * 0x18020000 ~ 0x18021000
4003     + *
4004     + * For example, reading 256 bytes at 0x60030004 uses the following commands:
4005     * 0x60030004 RT5677_SPI_READ_32 4 bytes
4006     * 0x60030008 RT5677_SPI_READ_BURST 240 bytes
4007     * 0x600300F8 RT5677_SPI_READ_BURST 8 bytes
4008     * 0x60030100 RT5677_SPI_READ_32 4 bytes
4009     - * 0x60030104 RT5677_SPI_READ_16 2 bytes
4010     *
4011     * Input:
4012     * @read: true for read commands; false for write commands
4013     @@ -79,15 +81,13 @@ static u8 rt5677_spi_select_cmd(bool read, u32 align, u32 remain, u32 *len)
4014     {
4015     u8 cmd;
4016    
4017     - if (align == 2 || align == 6 || remain == 2) {
4018     - cmd = RT5677_SPI_READ_16;
4019     - *len = 2;
4020     - } else if (align == 4 || remain <= 6) {
4021     + if (align == 4 || remain <= 4) {
4022     cmd = RT5677_SPI_READ_32;
4023     *len = 4;
4024     } else {
4025     cmd = RT5677_SPI_READ_BURST;
4026     - *len = min_t(u32, remain & ~7, RT5677_SPI_BURST_LEN);
4027     + *len = (((remain - 1) >> 3) + 1) << 3;
4028     + *len = min_t(u32, *len, RT5677_SPI_BURST_LEN);
4029     }
4030     return read ? cmd : cmd + 1;
4031     }
4032     @@ -108,7 +108,7 @@ static void rt5677_spi_reverse(u8 *dst, u32 dstlen, const u8 *src, u32 srclen)
4033     }
4034     }
4035    
4036     -/* Read DSP address space using SPI. addr and len have to be 2-byte aligned. */
4037     +/* Read DSP address space using SPI. addr and len have to be 4-byte aligned. */
4038     int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
4039     {
4040     u32 offset;
4041     @@ -124,7 +124,7 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
4042     if (!g_spi)
4043     return -ENODEV;
4044    
4045     - if ((addr & 1) || (len & 1)) {
4046     + if ((addr & 3) || (len & 3)) {
4047     dev_err(&g_spi->dev, "Bad read align 0x%x(%zu)\n", addr, len);
4048     return -EACCES;
4049     }
4050     @@ -159,13 +159,13 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
4051     }
4052     EXPORT_SYMBOL_GPL(rt5677_spi_read);
4053    
4054     -/* Write DSP address space using SPI. addr has to be 2-byte aligned.
4055     - * If len is not 2-byte aligned, an extra byte of zero is written at the end
4056     +/* Write DSP address space using SPI. addr has to be 4-byte aligned.
4057     + * If len is not 4-byte aligned, then extra zeros are written at the end
4058     * as padding.
4059     */
4060     int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
4061     {
4062     - u32 offset, len_with_pad = len;
4063     + u32 offset;
4064     int status = 0;
4065     struct spi_transfer t;
4066     struct spi_message m;
4067     @@ -178,22 +178,19 @@ int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
4068     if (!g_spi)
4069     return -ENODEV;
4070    
4071     - if (addr & 1) {
4072     + if (addr & 3) {
4073     dev_err(&g_spi->dev, "Bad write align 0x%x(%zu)\n", addr, len);
4074     return -EACCES;
4075     }
4076    
4077     - if (len & 1)
4078     - len_with_pad = len + 1;
4079     -
4080     memset(&t, 0, sizeof(t));
4081     t.tx_buf = buf;
4082     t.speed_hz = RT5677_SPI_FREQ;
4083     spi_message_init_with_transfers(&m, &t, 1);
4084    
4085     - for (offset = 0; offset < len_with_pad;) {
4086     + for (offset = 0; offset < len;) {
4087     spi_cmd = rt5677_spi_select_cmd(false, (addr + offset) & 7,
4088     - len_with_pad - offset, &t.len);
4089     + len - offset, &t.len);
4090    
4091     /* Construct SPI message header */
4092     buf[0] = spi_cmd;
4093     diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
4094     index 38fd32ab443c..ff96db91f818 100644
4095     --- a/sound/soc/fsl/fsl_esai.c
4096     +++ b/sound/soc/fsl/fsl_esai.c
4097     @@ -251,7 +251,7 @@ static int fsl_esai_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
4098     break;
4099     case ESAI_HCKT_EXTAL:
4100     ecr |= ESAI_ECR_ETI;
4101     - /* fall through */
4102     + break;
4103     case ESAI_HCKR_EXTAL:
4104     ecr |= ESAI_ECR_ERI;
4105     break;
4106     diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c
4107     index 19bee725de00..325b07b98b3c 100644
4108     --- a/sound/usb/line6/toneport.c
4109     +++ b/sound/usb/line6/toneport.c
4110     @@ -54,8 +54,8 @@ struct usb_line6_toneport {
4111     /* Firmware version (x 100) */
4112     u8 firmware_version;
4113    
4114     - /* Timer for delayed PCM startup */
4115     - struct timer_list timer;
4116     + /* Work for delayed PCM startup */
4117     + struct delayed_work pcm_work;
4118    
4119     /* Device type */
4120     enum line6_device_type type;
4121     @@ -241,9 +241,10 @@ static int snd_toneport_source_put(struct snd_kcontrol *kcontrol,
4122     return 1;
4123     }
4124    
4125     -static void toneport_start_pcm(struct timer_list *t)
4126     +static void toneport_start_pcm(struct work_struct *work)
4127     {
4128     - struct usb_line6_toneport *toneport = from_timer(toneport, t, timer);
4129     + struct usb_line6_toneport *toneport =
4130     + container_of(work, struct usb_line6_toneport, pcm_work.work);
4131     struct usb_line6 *line6 = &toneport->line6;
4132    
4133     line6_pcm_acquire(line6->line6pcm, LINE6_STREAM_MONITOR, true);
4134     @@ -393,7 +394,8 @@ static int toneport_setup(struct usb_line6_toneport *toneport)
4135     if (toneport_has_led(toneport))
4136     toneport_update_led(toneport);
4137    
4138     - mod_timer(&toneport->timer, jiffies + TONEPORT_PCM_DELAY * HZ);
4139     + schedule_delayed_work(&toneport->pcm_work,
4140     + msecs_to_jiffies(TONEPORT_PCM_DELAY * 1000));
4141     return 0;
4142     }
4143    
4144     @@ -405,7 +407,7 @@ static void line6_toneport_disconnect(struct usb_line6 *line6)
4145     struct usb_line6_toneport *toneport =
4146     (struct usb_line6_toneport *)line6;
4147    
4148     - del_timer_sync(&toneport->timer);
4149     + cancel_delayed_work_sync(&toneport->pcm_work);
4150    
4151     if (toneport_has_led(toneport))
4152     toneport_remove_leds(toneport);
4153     @@ -422,7 +424,7 @@ static int toneport_init(struct usb_line6 *line6,
4154     struct usb_line6_toneport *toneport = (struct usb_line6_toneport *) line6;
4155    
4156     toneport->type = id->driver_info;
4157     - timer_setup(&toneport->timer, toneport_start_pcm, 0);
4158     + INIT_DELAYED_WORK(&toneport->pcm_work, toneport_start_pcm);
4159    
4160     line6->disconnect = line6_toneport_disconnect;
4161    
4162     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
4163     index e7d441d0e839..5a10b1b7f6b9 100644
4164     --- a/sound/usb/mixer.c
4165     +++ b/sound/usb/mixer.c
4166     @@ -2679,6 +2679,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
4167     kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval);
4168     if (! kctl) {
4169     usb_audio_err(state->chip, "cannot malloc kcontrol\n");
4170     + for (i = 0; i < desc->bNrInPins; i++)
4171     + kfree(namelist[i]);
4172     kfree(namelist);
4173     kfree(cval);
4174     return -ENOMEM;
4175     diff --git a/tools/objtool/check.c b/tools/objtool/check.c
4176     index ef152daccc33..46be34576620 100644
4177     --- a/tools/objtool/check.c
4178     +++ b/tools/objtool/check.c
4179     @@ -1805,7 +1805,8 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
4180     return 1;
4181     }
4182    
4183     - func = insn->func ? insn->func->pfunc : NULL;
4184     + if (insn->func)
4185     + func = insn->func->pfunc;
4186    
4187     if (func && insn->ignore) {
4188     WARN_FUNC("BUG: why am I validating an ignored function?",