Magellan Linux

Annotation of /trunk/kernel-magellan/patches-5.0/0103-5.0.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3330 - (hide annotations) (download)
Fri Apr 26 12:20:19 2019 UTC (5 years, 5 months ago) by niro
File size: 363418 byte(s)
-linux-5.0.4
1 niro 3330 diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
2     index e133ccd60228..acfe3d0f78d1 100644
3     --- a/Documentation/DMA-API.txt
4     +++ b/Documentation/DMA-API.txt
5     @@ -195,6 +195,14 @@ Requesting the required mask does not alter the current mask. If you
6     wish to take advantage of it, you should issue a dma_set_mask()
7     call to set the mask to the value returned.
8    
9     +::
10     +
11     + size_t
12     + dma_direct_max_mapping_size(struct device *dev);
13     +
14     +Returns the maximum size of a mapping for the device. The size parameter
15     +of the mapping functions like dma_map_single(), dma_map_page() and
16     +others should not be larger than the returned value.
17    
18     Part Id - Streaming DMA mappings
19     --------------------------------
20     diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
21     index 1f09d043d086..ddb8ce5333ba 100644
22     --- a/Documentation/arm64/silicon-errata.txt
23     +++ b/Documentation/arm64/silicon-errata.txt
24     @@ -44,6 +44,8 @@ stable kernels.
25    
26     | Implementor | Component | Erratum ID | Kconfig |
27     +----------------+-----------------+-----------------+-----------------------------+
28     +| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
29     +| | | | |
30     | ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
31     | ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
32     | ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
33     diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
34     index a10c1f89037d..e1fe02f3e3e9 100644
35     --- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
36     +++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
37     @@ -11,11 +11,13 @@ New driver handles the following
38    
39     Required properties:
40     - compatible: Must be "samsung,exynos-adc-v1"
41     - for exynos4412/5250 controllers.
42     + for Exynos5250 controllers.
43     Must be "samsung,exynos-adc-v2" for
44     future controllers.
45     Must be "samsung,exynos3250-adc" for
46     controllers compatible with ADC of Exynos3250.
47     + Must be "samsung,exynos4212-adc" for
48     + controllers compatible with ADC of Exynos4212 and Exynos4412.
49     Must be "samsung,exynos7-adc" for
50     the ADC in Exynos7 and compatibles
51     Must be "samsung,s3c2410-adc" for
52     diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
53     index 0de6f6145cc6..7ba8cd567f84 100644
54     --- a/Documentation/process/stable-kernel-rules.rst
55     +++ b/Documentation/process/stable-kernel-rules.rst
56     @@ -38,6 +38,9 @@ Procedure for submitting patches to the -stable tree
57     - If the patch covers files in net/ or drivers/net please follow netdev stable
58     submission guidelines as described in
59     :ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
60     + after first checking the stable networking queue at
61     + https://patchwork.ozlabs.org/bundle/davem/stable/?series=&submitter=&state=*&q=&archive=
62     + to ensure the requested patch is not already queued up.
63     - Security patches should not be handled (solely) by the -stable review
64     process but should follow the procedures in
65     :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
66     diff --git a/Makefile b/Makefile
67     index fb888787e7d1..06fda21614bc 100644
68     --- a/Makefile
69     +++ b/Makefile
70     @@ -1,7 +1,7 @@
71     # SPDX-License-Identifier: GPL-2.0
72     VERSION = 5
73     PATCHLEVEL = 0
74     -SUBLEVEL = 3
75     +SUBLEVEL = 4
76     EXTRAVERSION =
77     NAME = Shy Crocodile
78    
79     diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S
80     index ce45ba0c0687..16019b5961e7 100644
81     --- a/arch/arm/crypto/crct10dif-ce-core.S
82     +++ b/arch/arm/crypto/crct10dif-ce-core.S
83     @@ -124,10 +124,10 @@ ENTRY(crc_t10dif_pmull)
84     vext.8 q10, qzr, q0, #4
85    
86     // receive the initial 64B data, xor the initial crc value
87     - vld1.64 {q0-q1}, [arg2, :128]!
88     - vld1.64 {q2-q3}, [arg2, :128]!
89     - vld1.64 {q4-q5}, [arg2, :128]!
90     - vld1.64 {q6-q7}, [arg2, :128]!
91     + vld1.64 {q0-q1}, [arg2]!
92     + vld1.64 {q2-q3}, [arg2]!
93     + vld1.64 {q4-q5}, [arg2]!
94     + vld1.64 {q6-q7}, [arg2]!
95     CPU_LE( vrev64.8 q0, q0 )
96     CPU_LE( vrev64.8 q1, q1 )
97     CPU_LE( vrev64.8 q2, q2 )
98     @@ -167,7 +167,7 @@ CPU_LE( vrev64.8 q7, q7 )
99     _fold_64_B_loop:
100    
101     .macro fold64, reg1, reg2
102     - vld1.64 {q11-q12}, [arg2, :128]!
103     + vld1.64 {q11-q12}, [arg2]!
104    
105     vmull.p64 q8, \reg1\()h, d21
106     vmull.p64 \reg1, \reg1\()l, d20
107     @@ -238,7 +238,7 @@ _16B_reduction_loop:
108     vmull.p64 q7, d15, d21
109     veor.8 q7, q7, q8
110    
111     - vld1.64 {q0}, [arg2, :128]!
112     + vld1.64 {q0}, [arg2]!
113     CPU_LE( vrev64.8 q0, q0 )
114     vswp d0, d1
115     veor.8 q7, q7, q0
116     @@ -335,7 +335,7 @@ _less_than_128:
117     vmov.i8 q0, #0
118     vmov s3, arg1_low32 // get the initial crc value
119    
120     - vld1.64 {q7}, [arg2, :128]!
121     + vld1.64 {q7}, [arg2]!
122     CPU_LE( vrev64.8 q7, q7 )
123     vswp d14, d15
124     veor.8 q7, q7, q0
125     diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
126     index d428355cf38d..14c19c70a841 100644
127     --- a/arch/arm/crypto/crct10dif-ce-glue.c
128     +++ b/arch/arm/crypto/crct10dif-ce-glue.c
129     @@ -35,26 +35,15 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
130     unsigned int length)
131     {
132     u16 *crc = shash_desc_ctx(desc);
133     - unsigned int l;
134    
135     - if (!may_use_simd()) {
136     - *crc = crc_t10dif_generic(*crc, data, length);
137     + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
138     + kernel_neon_begin();
139     + *crc = crc_t10dif_pmull(*crc, data, length);
140     + kernel_neon_end();
141     } else {
142     - if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
143     - l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
144     - ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
145     -
146     - *crc = crc_t10dif_generic(*crc, data, l);
147     -
148     - length -= l;
149     - data += l;
150     - }
151     - if (length > 0) {
152     - kernel_neon_begin();
153     - *crc = crc_t10dif_pmull(*crc, data, length);
154     - kernel_neon_end();
155     - }
156     + *crc = crc_t10dif_generic(*crc, data, length);
157     }
158     +
159     return 0;
160     }
161    
162     diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
163     index 058ce73137e8..5d819b6ea428 100644
164     --- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
165     +++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
166     @@ -65,16 +65,16 @@ static int osiris_dvs_notify(struct notifier_block *nb,
167    
168     switch (val) {
169     case CPUFREQ_PRECHANGE:
170     - if (old_dvs & !new_dvs ||
171     - cur_dvs & !new_dvs) {
172     + if ((old_dvs && !new_dvs) ||
173     + (cur_dvs && !new_dvs)) {
174     pr_debug("%s: exiting dvs\n", __func__);
175     cur_dvs = false;
176     gpio_set_value(OSIRIS_GPIO_DVS, 1);
177     }
178     break;
179     case CPUFREQ_POSTCHANGE:
180     - if (!old_dvs & new_dvs ||
181     - !cur_dvs & new_dvs) {
182     + if ((!old_dvs && new_dvs) ||
183     + (!cur_dvs && new_dvs)) {
184     pr_debug("entering dvs\n");
185     cur_dvs = true;
186     gpio_set_value(OSIRIS_GPIO_DVS, 0);
187     diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
188     index e3a375c4cb83..1b151442dac1 100644
189     --- a/arch/arm64/crypto/aes-ce-ccm-core.S
190     +++ b/arch/arm64/crypto/aes-ce-ccm-core.S
191     @@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data)
192     beq 10f
193     ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
194     b 7b
195     -8: mov w7, w8
196     +8: cbz w8, 91f
197     + mov w7, w8
198     add w8, w8, #16
199     9: ext v1.16b, v1.16b, v1.16b, #1
200     adds w7, w7, #1
201     bne 9b
202     - eor v0.16b, v0.16b, v1.16b
203     +91: eor v0.16b, v0.16b, v1.16b
204     st1 {v0.16b}, [x0]
205     10: str w8, [x3]
206     ret
207     diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
208     index 68b11aa690e4..986191e8c058 100644
209     --- a/arch/arm64/crypto/aes-ce-ccm-glue.c
210     +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
211     @@ -125,7 +125,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
212     abytes -= added;
213     }
214    
215     - while (abytes > AES_BLOCK_SIZE) {
216     + while (abytes >= AES_BLOCK_SIZE) {
217     __aes_arm64_encrypt(key->key_enc, mac, mac,
218     num_rounds(key));
219     crypto_xor(mac, in, AES_BLOCK_SIZE);
220     @@ -139,8 +139,6 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
221     num_rounds(key));
222     crypto_xor(mac, in, abytes);
223     *macp = abytes;
224     - } else {
225     - *macp = 0;
226     }
227     }
228     }
229     diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
230     index e613a87f8b53..8432c8d0dea6 100644
231     --- a/arch/arm64/crypto/aes-neonbs-core.S
232     +++ b/arch/arm64/crypto/aes-neonbs-core.S
233     @@ -971,18 +971,22 @@ CPU_LE( rev x8, x8 )
234    
235     8: next_ctr v0
236     st1 {v0.16b}, [x24]
237     - cbz x23, 0f
238     + cbz x23, .Lctr_done
239    
240     cond_yield_neon 98b
241     b 99b
242    
243     -0: frame_pop
244     +.Lctr_done:
245     + frame_pop
246     ret
247    
248     /*
249     * If we are handling the tail of the input (x6 != NULL), return the
250     * final keystream block back to the caller.
251     */
252     +0: cbz x25, 8b
253     + st1 {v0.16b}, [x25]
254     + b 8b
255     1: cbz x25, 8b
256     st1 {v1.16b}, [x25]
257     b 8b
258     diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
259     index b461d62023f2..567c24f3d224 100644
260     --- a/arch/arm64/crypto/crct10dif-ce-glue.c
261     +++ b/arch/arm64/crypto/crct10dif-ce-glue.c
262     @@ -39,26 +39,13 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
263     unsigned int length)
264     {
265     u16 *crc = shash_desc_ctx(desc);
266     - unsigned int l;
267    
268     - if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
269     - l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
270     - ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
271     -
272     - *crc = crc_t10dif_generic(*crc, data, l);
273     -
274     - length -= l;
275     - data += l;
276     - }
277     -
278     - if (length > 0) {
279     - if (may_use_simd()) {
280     - kernel_neon_begin();
281     - *crc = crc_t10dif_pmull(*crc, data, length);
282     - kernel_neon_end();
283     - } else {
284     - *crc = crc_t10dif_generic(*crc, data, length);
285     - }
286     + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
287     + kernel_neon_begin();
288     + *crc = crc_t10dif_pmull(*crc, data, length);
289     + kernel_neon_end();
290     + } else {
291     + *crc = crc_t10dif_generic(*crc, data, length);
292     }
293    
294     return 0;
295     diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
296     index 1473fc2f7ab7..89691c86640a 100644
297     --- a/arch/arm64/include/asm/hardirq.h
298     +++ b/arch/arm64/include/asm/hardirq.h
299     @@ -17,8 +17,12 @@
300     #define __ASM_HARDIRQ_H
301    
302     #include <linux/cache.h>
303     +#include <linux/percpu.h>
304     #include <linux/threads.h>
305     +#include <asm/barrier.h>
306     #include <asm/irq.h>
307     +#include <asm/kvm_arm.h>
308     +#include <asm/sysreg.h>
309    
310     #define NR_IPI 7
311    
312     @@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
313    
314     #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
315    
316     +struct nmi_ctx {
317     + u64 hcr;
318     +};
319     +
320     +DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
321     +
322     +#define arch_nmi_enter() \
323     + do { \
324     + if (is_kernel_in_hyp_mode()) { \
325     + struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
326     + nmi_ctx->hcr = read_sysreg(hcr_el2); \
327     + if (!(nmi_ctx->hcr & HCR_TGE)) { \
328     + write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
329     + isb(); \
330     + } \
331     + } \
332     + } while (0)
333     +
334     +#define arch_nmi_exit() \
335     + do { \
336     + if (is_kernel_in_hyp_mode()) { \
337     + struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
338     + if (!(nmi_ctx->hcr & HCR_TGE)) \
339     + write_sysreg(nmi_ctx->hcr, hcr_el2); \
340     + } \
341     + } while (0)
342     +
343     static inline void ack_bad_irq(unsigned int irq)
344     {
345     extern unsigned long irq_err_count;
346     diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
347     index 780a12f59a8f..92fa81798fb9 100644
348     --- a/arch/arm64/kernel/irq.c
349     +++ b/arch/arm64/kernel/irq.c
350     @@ -33,6 +33,9 @@
351    
352     unsigned long irq_err_count;
353    
354     +/* Only access this in an NMI enter/exit */
355     +DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
356     +
357     DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
358    
359     int arch_show_interrupts(struct seq_file *p, int prec)
360     diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
361     index ce46c4cdf368..691854b77c7f 100644
362     --- a/arch/arm64/kernel/kgdb.c
363     +++ b/arch/arm64/kernel/kgdb.c
364     @@ -244,27 +244,33 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
365    
366     static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
367     {
368     + if (user_mode(regs))
369     + return DBG_HOOK_ERROR;
370     +
371     kgdb_handle_exception(1, SIGTRAP, 0, regs);
372     - return 0;
373     + return DBG_HOOK_HANDLED;
374     }
375     NOKPROBE_SYMBOL(kgdb_brk_fn)
376    
377     static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
378     {
379     + if (user_mode(regs))
380     + return DBG_HOOK_ERROR;
381     +
382     compiled_break = 1;
383     kgdb_handle_exception(1, SIGTRAP, 0, regs);
384    
385     - return 0;
386     + return DBG_HOOK_HANDLED;
387     }
388     NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
389    
390     static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
391     {
392     - if (!kgdb_single_step)
393     + if (user_mode(regs) || !kgdb_single_step)
394     return DBG_HOOK_ERROR;
395    
396     kgdb_handle_exception(1, SIGTRAP, 0, regs);
397     - return 0;
398     + return DBG_HOOK_HANDLED;
399     }
400     NOKPROBE_SYMBOL(kgdb_step_brk_fn);
401    
402     diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
403     index f17afb99890c..7fb6f3aa5ceb 100644
404     --- a/arch/arm64/kernel/probes/kprobes.c
405     +++ b/arch/arm64/kernel/probes/kprobes.c
406     @@ -450,6 +450,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
407     struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
408     int retval;
409    
410     + if (user_mode(regs))
411     + return DBG_HOOK_ERROR;
412     +
413     /* return error if this is not our step */
414     retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
415    
416     @@ -466,6 +469,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
417     int __kprobes
418     kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
419     {
420     + if (user_mode(regs))
421     + return DBG_HOOK_ERROR;
422     +
423     kprobe_handler(regs);
424     return DBG_HOOK_HANDLED;
425     }
426     diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
427     index c936aa40c3f4..b6dac3a68508 100644
428     --- a/arch/arm64/kvm/sys_regs.c
429     +++ b/arch/arm64/kvm/sys_regs.c
430     @@ -1476,7 +1476,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
431    
432     { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
433     { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
434     - { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
435     + { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
436     };
437    
438     static bool trap_dbgidr(struct kvm_vcpu *vcpu,
439     diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
440     index efb7b2cbead5..ef46925096f0 100644
441     --- a/arch/arm64/mm/fault.c
442     +++ b/arch/arm64/mm/fault.c
443     @@ -824,11 +824,12 @@ void __init hook_debug_fault_code(int nr,
444     debug_fault_info[nr].name = name;
445     }
446    
447     -asmlinkage int __exception do_debug_exception(unsigned long addr,
448     +asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
449     unsigned int esr,
450     struct pt_regs *regs)
451     {
452     const struct fault_info *inf = esr_to_debug_fault_info(esr);
453     + unsigned long pc = instruction_pointer(regs);
454     int rv;
455    
456     /*
457     @@ -838,14 +839,14 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
458     if (interrupts_enabled(regs))
459     trace_hardirqs_off();
460    
461     - if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs)))
462     + if (user_mode(regs) && !is_ttbr0_addr(pc))
463     arm64_apply_bp_hardening();
464    
465     - if (!inf->fn(addr, esr, regs)) {
466     + if (!inf->fn(addr_if_watchpoint, esr, regs)) {
467     rv = 1;
468     } else {
469     arm64_notify_die(inf->name, regs,
470     - inf->sig, inf->code, (void __user *)addr, esr);
471     + inf->sig, inf->code, (void __user *)pc, esr);
472     rv = 0;
473     }
474    
475     diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
476     index f00ca53f8c14..482513b9af2c 100644
477     --- a/arch/m68k/Makefile
478     +++ b/arch/m68k/Makefile
479     @@ -58,7 +58,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
480     cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
481    
482     KBUILD_AFLAGS += $(cpuflags-y)
483     -KBUILD_CFLAGS += $(cpuflags-y) -pipe
484     +KBUILD_CFLAGS += $(cpuflags-y)
485     +
486     +KBUILD_CFLAGS += -pipe -ffreestanding
487     +
488     ifdef CONFIG_MMU
489     # without -fno-strength-reduce the 53c7xx.c driver fails ;-(
490     KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
491     diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
492     index d2abd98471e8..41204a49cf95 100644
493     --- a/arch/mips/include/asm/kvm_host.h
494     +++ b/arch/mips/include/asm/kvm_host.h
495     @@ -1134,7 +1134,7 @@ static inline void kvm_arch_hardware_unsetup(void) {}
496     static inline void kvm_arch_sync_events(struct kvm *kvm) {}
497     static inline void kvm_arch_free_memslot(struct kvm *kvm,
498     struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
499     -static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
500     +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
501     static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
502     static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
503     static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
504     diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
505     index 5b0177733994..46130ef4941c 100644
506     --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
507     +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
508     @@ -35,6 +35,14 @@ static inline int hstate_get_psize(struct hstate *hstate)
509     #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
510     static inline bool gigantic_page_supported(void)
511     {
512     + /*
513     + * We used gigantic page reservation with hypervisor assist in some case.
514     + * We cannot use runtime allocation of gigantic pages in those platforms
515     + * This is hash translation mode LPARs.
516     + */
517     + if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
518     + return false;
519     +
520     return true;
521     }
522     #endif
523     diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
524     index 0f98f00da2ea..19693b8add93 100644
525     --- a/arch/powerpc/include/asm/kvm_host.h
526     +++ b/arch/powerpc/include/asm/kvm_host.h
527     @@ -837,7 +837,7 @@ struct kvm_vcpu_arch {
528     static inline void kvm_arch_hardware_disable(void) {}
529     static inline void kvm_arch_hardware_unsetup(void) {}
530     static inline void kvm_arch_sync_events(struct kvm *kvm) {}
531     -static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
532     +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
533     static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
534     static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
535     static inline void kvm_arch_exit(void) {}
536     diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
537     index 2f3ff7a27881..d85fcfea32ca 100644
538     --- a/arch/powerpc/include/asm/powernv.h
539     +++ b/arch/powerpc/include/asm/powernv.h
540     @@ -23,6 +23,8 @@ extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
541     unsigned long *flags, unsigned long *status,
542     int count);
543    
544     +void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val);
545     +
546     void pnv_tm_init(void);
547     #else
548     static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { }
549     diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
550     index 0768dfd8a64e..fdd528cdb2ee 100644
551     --- a/arch/powerpc/kernel/entry_32.S
552     +++ b/arch/powerpc/kernel/entry_32.S
553     @@ -745,6 +745,9 @@ fast_exception_return:
554     mtcr r10
555     lwz r10,_LINK(r11)
556     mtlr r10
557     + /* Clear the exception_marker on the stack to avoid confusing stacktrace */
558     + li r10, 0
559     + stw r10, 8(r11)
560     REST_GPR(10, r11)
561     #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
562     mtspr SPRN_NRI, r0
563     @@ -982,6 +985,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
564     mtcrf 0xFF,r10
565     mtlr r11
566    
567     + /* Clear the exception_marker on the stack to avoid confusing stacktrace */
568     + li r10, 0
569     + stw r10, 8(r1)
570     /*
571     * Once we put values in SRR0 and SRR1, we are in a state
572     * where exceptions are not recoverable, since taking an
573     @@ -1021,6 +1027,9 @@ exc_exit_restart_end:
574     mtlr r11
575     lwz r10,_CCR(r1)
576     mtcrf 0xff,r10
577     + /* Clear the exception_marker on the stack to avoid confusing stacktrace */
578     + li r10, 0
579     + stw r10, 8(r1)
580     REST_2GPRS(9, r1)
581     .globl exc_exit_restart
582     exc_exit_restart:
583     diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
584     index ce393df243aa..71bad4b6f80d 100644
585     --- a/arch/powerpc/kernel/process.c
586     +++ b/arch/powerpc/kernel/process.c
587     @@ -176,7 +176,7 @@ static void __giveup_fpu(struct task_struct *tsk)
588    
589     save_fpu(tsk);
590     msr = tsk->thread.regs->msr;
591     - msr &= ~MSR_FP;
592     + msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
593     #ifdef CONFIG_VSX
594     if (cpu_has_feature(CPU_FTR_VSX))
595     msr &= ~MSR_VSX;
596     diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
597     index cdd5d1d3ae41..53151698bfe0 100644
598     --- a/arch/powerpc/kernel/ptrace.c
599     +++ b/arch/powerpc/kernel/ptrace.c
600     @@ -561,6 +561,7 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
601     /*
602     * Copy out only the low-order word of vrsave.
603     */
604     + int start, end;
605     union {
606     elf_vrreg_t reg;
607     u32 word;
608     @@ -569,8 +570,10 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
609    
610     vrsave.word = target->thread.vrsave;
611    
612     + start = 33 * sizeof(vector128);
613     + end = start + sizeof(vrsave);
614     ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
615     - 33 * sizeof(vector128), -1);
616     + start, end);
617     }
618    
619     return ret;
620     @@ -608,6 +611,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
621     /*
622     * We use only the first word of vrsave.
623     */
624     + int start, end;
625     union {
626     elf_vrreg_t reg;
627     u32 word;
628     @@ -616,8 +620,10 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
629    
630     vrsave.word = target->thread.vrsave;
631    
632     + start = 33 * sizeof(vector128);
633     + end = start + sizeof(vrsave);
634     ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
635     - 33 * sizeof(vector128), -1);
636     + start, end);
637     if (!ret)
638     target->thread.vrsave = vrsave.word;
639     }
640     diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
641     index 3f15edf25a0d..6e521a3f67ca 100644
642     --- a/arch/powerpc/kernel/smp.c
643     +++ b/arch/powerpc/kernel/smp.c
644     @@ -358,13 +358,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
645     * NMI IPIs may not be recoverable, so should not be used as ongoing part of
646     * a running system. They can be used for crash, debug, halt/reboot, etc.
647     *
648     - * NMI IPIs are globally single threaded. No more than one in progress at
649     - * any time.
650     - *
651     * The IPI call waits with interrupts disabled until all targets enter the
652     - * NMI handler, then the call returns.
653     + * NMI handler, then returns. Subsequent IPIs can be issued before targets
654     + * have returned from their handlers, so there is no guarantee about
655     + * concurrency or re-entrancy.
656     *
657     - * No new NMI can be initiated until targets exit the handler.
658     + * A new NMI can be issued before all targets exit the handler.
659     *
660     * The IPI call may time out without all targets entering the NMI handler.
661     * In that case, there is some logic to recover (and ignore subsequent
662     @@ -375,7 +374,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
663    
664     static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
665     static struct cpumask nmi_ipi_pending_mask;
666     -static int nmi_ipi_busy_count = 0;
667     +static bool nmi_ipi_busy = false;
668     static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
669    
670     static void nmi_ipi_lock_start(unsigned long *flags)
671     @@ -414,7 +413,7 @@ static void nmi_ipi_unlock_end(unsigned long *flags)
672     */
673     int smp_handle_nmi_ipi(struct pt_regs *regs)
674     {
675     - void (*fn)(struct pt_regs *);
676     + void (*fn)(struct pt_regs *) = NULL;
677     unsigned long flags;
678     int me = raw_smp_processor_id();
679     int ret = 0;
680     @@ -425,29 +424,17 @@ int smp_handle_nmi_ipi(struct pt_regs *regs)
681     * because the caller may have timed out.
682     */
683     nmi_ipi_lock_start(&flags);
684     - if (!nmi_ipi_busy_count)
685     - goto out;
686     - if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
687     - goto out;
688     -
689     - fn = nmi_ipi_function;
690     - if (!fn)
691     - goto out;
692     -
693     - cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
694     - nmi_ipi_busy_count++;
695     - nmi_ipi_unlock();
696     -
697     - ret = 1;
698     -
699     - fn(regs);
700     -
701     - nmi_ipi_lock();
702     - if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */
703     - nmi_ipi_busy_count--;
704     -out:
705     + if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
706     + cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
707     + fn = READ_ONCE(nmi_ipi_function);
708     + WARN_ON_ONCE(!fn);
709     + ret = 1;
710     + }
711     nmi_ipi_unlock_end(&flags);
712    
713     + if (fn)
714     + fn(regs);
715     +
716     return ret;
717     }
718    
719     @@ -473,7 +460,7 @@ static void do_smp_send_nmi_ipi(int cpu, bool safe)
720     * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
721     * - fn is the target callback function.
722     * - delay_us > 0 is the delay before giving up waiting for targets to
723     - * complete executing the handler, == 0 specifies indefinite delay.
724     + * begin executing the handler, == 0 specifies indefinite delay.
725     */
726     int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
727     {
728     @@ -487,31 +474,33 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
729     if (unlikely(!smp_ops))
730     return 0;
731    
732     - /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
733     nmi_ipi_lock_start(&flags);
734     - while (nmi_ipi_busy_count) {
735     + while (nmi_ipi_busy) {
736     nmi_ipi_unlock_end(&flags);
737     - spin_until_cond(nmi_ipi_busy_count == 0);
738     + spin_until_cond(!nmi_ipi_busy);
739     nmi_ipi_lock_start(&flags);
740     }
741     -
742     + nmi_ipi_busy = true;
743     nmi_ipi_function = fn;
744    
745     + WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
746     +
747     if (cpu < 0) {
748     /* ALL_OTHERS */
749     cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
750     cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
751     } else {
752     - /* cpumask starts clear */
753     cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
754     }
755     - nmi_ipi_busy_count++;
756     +
757     nmi_ipi_unlock();
758    
759     + /* Interrupts remain hard disabled */
760     +
761     do_smp_send_nmi_ipi(cpu, safe);
762    
763     nmi_ipi_lock();
764     - /* nmi_ipi_busy_count is held here, so unlock/lock is okay */
765     + /* nmi_ipi_busy is set here, so unlock/lock is okay */
766     while (!cpumask_empty(&nmi_ipi_pending_mask)) {
767     nmi_ipi_unlock();
768     udelay(1);
769     @@ -523,29 +512,15 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
770     }
771     }
772    
773     - while (nmi_ipi_busy_count > 1) {
774     - nmi_ipi_unlock();
775     - udelay(1);
776     - nmi_ipi_lock();
777     - if (delay_us) {
778     - delay_us--;
779     - if (!delay_us)
780     - break;
781     - }
782     - }
783     -
784     if (!cpumask_empty(&nmi_ipi_pending_mask)) {
785     /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
786     ret = 0;
787     cpumask_clear(&nmi_ipi_pending_mask);
788     }
789     - if (nmi_ipi_busy_count > 1) {
790     - /* Timeout waiting for CPUs to execute fn */
791     - ret = 0;
792     - nmi_ipi_busy_count = 1;
793     - }
794    
795     - nmi_ipi_busy_count--;
796     + nmi_ipi_function = NULL;
797     + nmi_ipi_busy = false;
798     +
799     nmi_ipi_unlock_end(&flags);
800    
801     return ret;
802     @@ -613,17 +588,8 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
803     static void nmi_stop_this_cpu(struct pt_regs *regs)
804     {
805     /*
806     - * This is a special case because it never returns, so the NMI IPI
807     - * handling would never mark it as done, which makes any later
808     - * smp_send_nmi_ipi() call spin forever. Mark it done now.
809     - *
810     * IRQs are already hard disabled by the smp_handle_nmi_ipi.
811     */
812     - nmi_ipi_lock();
813     - if (nmi_ipi_busy_count > 1)
814     - nmi_ipi_busy_count--;
815     - nmi_ipi_unlock();
816     -
817     spin_begin();
818     while (1)
819     spin_cpu_relax();
820     diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
821     index 64936b60d521..7a1de34f38c8 100644
822     --- a/arch/powerpc/kernel/traps.c
823     +++ b/arch/powerpc/kernel/traps.c
824     @@ -763,15 +763,15 @@ void machine_check_exception(struct pt_regs *regs)
825     if (check_io_access(regs))
826     goto bail;
827    
828     - /* Must die if the interrupt is not recoverable */
829     - if (!(regs->msr & MSR_RI))
830     - nmi_panic(regs, "Unrecoverable Machine check");
831     -
832     if (!nested)
833     nmi_exit();
834    
835     die("Machine check", regs, SIGBUS);
836    
837     + /* Must die if the interrupt is not recoverable */
838     + if (!(regs->msr & MSR_RI))
839     + nmi_panic(regs, "Unrecoverable Machine check");
840     +
841     return;
842    
843     bail:
844     @@ -1542,8 +1542,8 @@ bail:
845    
846     void StackOverflow(struct pt_regs *regs)
847     {
848     - printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
849     - current, regs->gpr[1]);
850     + pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
851     + current->comm, task_pid_nr(current), regs->gpr[1]);
852     debugger(regs);
853     show_regs(regs);
854     panic("kernel stack overflow");
855     diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
856     index 9b8d50a7cbaf..45b06e239d1f 100644
857     --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
858     +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
859     @@ -58,6 +58,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
860     #define STACK_SLOT_DAWR (SFS-56)
861     #define STACK_SLOT_DAWRX (SFS-64)
862     #define STACK_SLOT_HFSCR (SFS-72)
863     +#define STACK_SLOT_AMR (SFS-80)
864     +#define STACK_SLOT_UAMOR (SFS-88)
865     /* the following is used by the P9 short path */
866     #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
867    
868     @@ -726,11 +728,9 @@ BEGIN_FTR_SECTION
869     mfspr r5, SPRN_TIDR
870     mfspr r6, SPRN_PSSCR
871     mfspr r7, SPRN_PID
872     - mfspr r8, SPRN_IAMR
873     std r5, STACK_SLOT_TID(r1)
874     std r6, STACK_SLOT_PSSCR(r1)
875     std r7, STACK_SLOT_PID(r1)
876     - std r8, STACK_SLOT_IAMR(r1)
877     mfspr r5, SPRN_HFSCR
878     std r5, STACK_SLOT_HFSCR(r1)
879     END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
880     @@ -738,11 +738,18 @@ BEGIN_FTR_SECTION
881     mfspr r5, SPRN_CIABR
882     mfspr r6, SPRN_DAWR
883     mfspr r7, SPRN_DAWRX
884     + mfspr r8, SPRN_IAMR
885     std r5, STACK_SLOT_CIABR(r1)
886     std r6, STACK_SLOT_DAWR(r1)
887     std r7, STACK_SLOT_DAWRX(r1)
888     + std r8, STACK_SLOT_IAMR(r1)
889     END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
890    
891     + mfspr r5, SPRN_AMR
892     + std r5, STACK_SLOT_AMR(r1)
893     + mfspr r6, SPRN_UAMOR
894     + std r6, STACK_SLOT_UAMOR(r1)
895     +
896     BEGIN_FTR_SECTION
897     /* Set partition DABR */
898     /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
899     @@ -1631,22 +1638,25 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
900     mtspr SPRN_PSPB, r0
901     mtspr SPRN_WORT, r0
902     BEGIN_FTR_SECTION
903     - mtspr SPRN_IAMR, r0
904     mtspr SPRN_TCSCR, r0
905     /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
906     li r0, 1
907     sldi r0, r0, 31
908     mtspr SPRN_MMCRS, r0
909     END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
910     -8:
911    
912     - /* Save and reset AMR and UAMOR before turning on the MMU */
913     + /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
914     + ld r8, STACK_SLOT_IAMR(r1)
915     + mtspr SPRN_IAMR, r8
916     +
917     +8: /* Power7 jumps back in here */
918     mfspr r5,SPRN_AMR
919     mfspr r6,SPRN_UAMOR
920     std r5,VCPU_AMR(r9)
921     std r6,VCPU_UAMOR(r9)
922     - li r6,0
923     - mtspr SPRN_AMR,r6
924     + ld r5,STACK_SLOT_AMR(r1)
925     + ld r6,STACK_SLOT_UAMOR(r1)
926     + mtspr SPRN_AMR, r5
927     mtspr SPRN_UAMOR, r6
928    
929     /* Switch DSCR back to host value */
930     @@ -1746,11 +1756,9 @@ BEGIN_FTR_SECTION
931     ld r5, STACK_SLOT_TID(r1)
932     ld r6, STACK_SLOT_PSSCR(r1)
933     ld r7, STACK_SLOT_PID(r1)
934     - ld r8, STACK_SLOT_IAMR(r1)
935     mtspr SPRN_TIDR, r5
936     mtspr SPRN_PSSCR, r6
937     mtspr SPRN_PID, r7
938     - mtspr SPRN_IAMR, r8
939     END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
940    
941     #ifdef CONFIG_PPC_RADIX_MMU
942     diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
943     index bc3914d54e26..5986df48359b 100644
944     --- a/arch/powerpc/mm/slb.c
945     +++ b/arch/powerpc/mm/slb.c
946     @@ -69,6 +69,11 @@ static void assert_slb_presence(bool present, unsigned long ea)
947     if (!cpu_has_feature(CPU_FTR_ARCH_206))
948     return;
949    
950     + /*
951     + * slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware
952     + * ignores all other bits from 0-27, so just clear them all.
953     + */
954     + ea &= ~((1UL << 28) - 1);
955     asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
956    
957     WARN_ON(present == (tmp == 0));
958     diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
959     index 3d1ecd211776..8137f77abad5 100644
960     --- a/arch/powerpc/platforms/83xx/suspend-asm.S
961     +++ b/arch/powerpc/platforms/83xx/suspend-asm.S
962     @@ -26,13 +26,13 @@
963     #define SS_MSR 0x74
964     #define SS_SDR1 0x78
965     #define SS_LR 0x7c
966     -#define SS_SPRG 0x80 /* 4 SPRGs */
967     -#define SS_DBAT 0x90 /* 8 DBATs */
968     -#define SS_IBAT 0xd0 /* 8 IBATs */
969     -#define SS_TB 0x110
970     -#define SS_CR 0x118
971     -#define SS_GPREG 0x11c /* r12-r31 */
972     -#define STATE_SAVE_SIZE 0x16c
973     +#define SS_SPRG 0x80 /* 8 SPRGs */
974     +#define SS_DBAT 0xa0 /* 8 DBATs */
975     +#define SS_IBAT 0xe0 /* 8 IBATs */
976     +#define SS_TB 0x120
977     +#define SS_CR 0x128
978     +#define SS_GPREG 0x12c /* r12-r31 */
979     +#define STATE_SAVE_SIZE 0x17c
980    
981     .section .data
982     .align 5
983     @@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep)
984     stw r7, SS_SPRG+12(r3)
985     stw r8, SS_SDR1(r3)
986    
987     + mfspr r4, SPRN_SPRG4
988     + mfspr r5, SPRN_SPRG5
989     + mfspr r6, SPRN_SPRG6
990     + mfspr r7, SPRN_SPRG7
991     +
992     + stw r4, SS_SPRG+16(r3)
993     + stw r5, SS_SPRG+20(r3)
994     + stw r6, SS_SPRG+24(r3)
995     + stw r7, SS_SPRG+28(r3)
996     +
997     mfspr r4, SPRN_DBAT0U
998     mfspr r5, SPRN_DBAT0L
999     mfspr r6, SPRN_DBAT1U
1000     @@ -493,6 +503,16 @@ mpc83xx_deep_resume:
1001     mtspr SPRN_IBAT7U, r6
1002     mtspr SPRN_IBAT7L, r7
1003    
1004     + lwz r4, SS_SPRG+16(r3)
1005     + lwz r5, SS_SPRG+20(r3)
1006     + lwz r6, SS_SPRG+24(r3)
1007     + lwz r7, SS_SPRG+28(r3)
1008     +
1009     + mtspr SPRN_SPRG4, r4
1010     + mtspr SPRN_SPRG5, r5
1011     + mtspr SPRN_SPRG6, r6
1012     + mtspr SPRN_SPRG7, r7
1013     +
1014     lwz r4, SS_SPRG+0(r3)
1015     lwz r5, SS_SPRG+4(r3)
1016     lwz r6, SS_SPRG+8(r3)
1017     diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
1018     index ecf703ee3a76..ac4ee88efc80 100644
1019     --- a/arch/powerpc/platforms/embedded6xx/wii.c
1020     +++ b/arch/powerpc/platforms/embedded6xx/wii.c
1021     @@ -83,6 +83,10 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top)
1022     /* MEM2 64MB@0x10000000 */
1023     delta = wii_hole_start + wii_hole_size;
1024     size = top - delta;
1025     +
1026     + if (__map_without_bats)
1027     + return delta;
1028     +
1029     for (bl = 128<<10; bl < max_size; bl <<= 1) {
1030     if (bl * 2 > size)
1031     break;
1032     diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
1033     index 35f699ebb662..e52f9b06dd9c 100644
1034     --- a/arch/powerpc/platforms/powernv/idle.c
1035     +++ b/arch/powerpc/platforms/powernv/idle.c
1036     @@ -458,7 +458,8 @@ EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
1037     #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1038    
1039     #ifdef CONFIG_HOTPLUG_CPU
1040     -static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
1041     +
1042     +void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
1043     {
1044     u64 pir = get_hard_smp_processor_id(cpu);
1045    
1046     @@ -481,20 +482,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
1047     {
1048     unsigned long srr1;
1049     u32 idle_states = pnv_get_supported_cpuidle_states();
1050     - u64 lpcr_val;
1051     -
1052     - /*
1053     - * We don't want to take decrementer interrupts while we are
1054     - * offline, so clear LPCR:PECE1. We keep PECE2 (and
1055     - * LPCR_PECE_HVEE on P9) enabled as to let IPIs in.
1056     - *
1057     - * If the CPU gets woken up by a special wakeup, ensure that
1058     - * the SLW engine sets LPCR with decrementer bit cleared, else
1059     - * the CPU will come back to the kernel due to a spurious
1060     - * wakeup.
1061     - */
1062     - lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
1063     - pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
1064    
1065     __ppc64_runlatch_off();
1066    
1067     @@ -526,16 +513,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
1068    
1069     __ppc64_runlatch_on();
1070    
1071     - /*
1072     - * Re-enable decrementer interrupts in LPCR.
1073     - *
1074     - * Further, we want stop states to be woken up by decrementer
1075     - * for non-hotplug cases. So program the LPCR via stop api as
1076     - * well.
1077     - */
1078     - lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
1079     - pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
1080     -
1081     return srr1;
1082     }
1083     #endif
1084     diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
1085     index acd3206dfae3..06628c71cef6 100644
1086     --- a/arch/powerpc/platforms/powernv/opal-msglog.c
1087     +++ b/arch/powerpc/platforms/powernv/opal-msglog.c
1088     @@ -98,7 +98,7 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
1089     }
1090    
1091     static struct bin_attribute opal_msglog_attr = {
1092     - .attr = {.name = "msglog", .mode = 0444},
1093     + .attr = {.name = "msglog", .mode = 0400},
1094     .read = opal_msglog_read
1095     };
1096    
1097     diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
1098     index 0d354e19ef92..db09c7022635 100644
1099     --- a/arch/powerpc/platforms/powernv/smp.c
1100     +++ b/arch/powerpc/platforms/powernv/smp.c
1101     @@ -39,6 +39,7 @@
1102     #include <asm/cpuidle.h>
1103     #include <asm/kexec.h>
1104     #include <asm/reg.h>
1105     +#include <asm/powernv.h>
1106    
1107     #include "powernv.h"
1108    
1109     @@ -153,6 +154,7 @@ static void pnv_smp_cpu_kill_self(void)
1110     {
1111     unsigned int cpu;
1112     unsigned long srr1, wmask;
1113     + u64 lpcr_val;
1114    
1115     /* Standard hot unplug procedure */
1116     /*
1117     @@ -174,6 +176,19 @@ static void pnv_smp_cpu_kill_self(void)
1118     if (cpu_has_feature(CPU_FTR_ARCH_207S))
1119     wmask = SRR1_WAKEMASK_P8;
1120    
1121     + /*
1122     + * We don't want to take decrementer interrupts while we are
1123     + * offline, so clear LPCR:PECE1. We keep PECE2 (and
1124     + * LPCR_PECE_HVEE on P9) enabled so as to let IPIs in.
1125     + *
1126     + * If the CPU gets woken up by a special wakeup, ensure that
1127     + * the SLW engine sets LPCR with decrementer bit cleared, else
1128     + * the CPU will come back to the kernel due to a spurious
1129     + * wakeup.
1130     + */
1131     + lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
1132     + pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
1133     +
1134     while (!generic_check_cpu_restart(cpu)) {
1135     /*
1136     * Clear IPI flag, since we don't handle IPIs while
1137     @@ -246,6 +261,16 @@ static void pnv_smp_cpu_kill_self(void)
1138    
1139     }
1140    
1141     + /*
1142     + * Re-enable decrementer interrupts in LPCR.
1143     + *
1144     + * Further, we want stop states to be woken up by decrementer
1145     + * for non-hotplug cases. So program the LPCR via stop api as
1146     + * well.
1147     + */
1148     + lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
1149     + pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
1150     +
1151     DBG("CPU%d coming online...\n", cpu);
1152     }
1153    
1154     diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
1155     index d5d24889c3bc..c2b8c8c6c9be 100644
1156     --- a/arch/s390/include/asm/kvm_host.h
1157     +++ b/arch/s390/include/asm/kvm_host.h
1158     @@ -878,7 +878,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
1159     static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1160     static inline void kvm_arch_free_memslot(struct kvm *kvm,
1161     struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
1162     -static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
1163     +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
1164     static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
1165     static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1166     struct kvm_memory_slot *slot) {}
1167     diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
1168     index 7ed90a759135..01a3f4964d57 100644
1169     --- a/arch/s390/kernel/setup.c
1170     +++ b/arch/s390/kernel/setup.c
1171     @@ -369,7 +369,7 @@ void __init arch_call_rest_init(void)
1172     : : [_frame] "a" (frame));
1173     }
1174    
1175     -static void __init setup_lowcore(void)
1176     +static void __init setup_lowcore_dat_off(void)
1177     {
1178     struct lowcore *lc;
1179    
1180     @@ -380,19 +380,16 @@ static void __init setup_lowcore(void)
1181     lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
1182     lc->restart_psw.mask = PSW_KERNEL_BITS;
1183     lc->restart_psw.addr = (unsigned long) restart_int_handler;
1184     - lc->external_new_psw.mask = PSW_KERNEL_BITS |
1185     - PSW_MASK_DAT | PSW_MASK_MCHECK;
1186     + lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
1187     lc->external_new_psw.addr = (unsigned long) ext_int_handler;
1188     lc->svc_new_psw.mask = PSW_KERNEL_BITS |
1189     - PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
1190     + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
1191     lc->svc_new_psw.addr = (unsigned long) system_call;
1192     - lc->program_new_psw.mask = PSW_KERNEL_BITS |
1193     - PSW_MASK_DAT | PSW_MASK_MCHECK;
1194     + lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
1195     lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
1196     lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
1197     lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
1198     - lc->io_new_psw.mask = PSW_KERNEL_BITS |
1199     - PSW_MASK_DAT | PSW_MASK_MCHECK;
1200     + lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
1201     lc->io_new_psw.addr = (unsigned long) io_int_handler;
1202     lc->clock_comparator = clock_comparator_max;
1203     lc->nodat_stack = ((unsigned long) &init_thread_union)
1204     @@ -452,6 +449,16 @@ static void __init setup_lowcore(void)
1205     lowcore_ptr[0] = lc;
1206     }
1207    
1208     +static void __init setup_lowcore_dat_on(void)
1209     +{
1210     + __ctl_clear_bit(0, 28);
1211     + S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
1212     + S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
1213     + S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
1214     + S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
1215     + __ctl_set_bit(0, 28);
1216     +}
1217     +
1218     static struct resource code_resource = {
1219     .name = "Kernel code",
1220     .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
1221     @@ -1072,7 +1079,7 @@ void __init setup_arch(char **cmdline_p)
1222     #endif
1223    
1224     setup_resources();
1225     - setup_lowcore();
1226     + setup_lowcore_dat_off();
1227     smp_fill_possible_mask();
1228     cpu_detect_mhz_feature();
1229     cpu_init();
1230     @@ -1085,6 +1092,12 @@ void __init setup_arch(char **cmdline_p)
1231     */
1232     paging_init();
1233    
1234     + /*
1235     + * After paging_init created the kernel page table, the new PSWs
1236     + * in lowcore can now run with DAT enabled.
1237     + */
1238     + setup_lowcore_dat_on();
1239     +
1240     /* Setup default console */
1241     conmode_default();
1242     set_preferred_console();
1243     diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
1244     index 2a356b948720..3ea71b871813 100644
1245     --- a/arch/x86/crypto/aegis128-aesni-glue.c
1246     +++ b/arch/x86/crypto/aegis128-aesni-glue.c
1247     @@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
1248     }
1249    
1250     static void crypto_aegis128_aesni_process_crypt(
1251     - struct aegis_state *state, struct aead_request *req,
1252     + struct aegis_state *state, struct skcipher_walk *walk,
1253     const struct aegis_crypt_ops *ops)
1254     {
1255     - struct skcipher_walk walk;
1256     - u8 *src, *dst;
1257     - unsigned int chunksize, base;
1258     -
1259     - ops->skcipher_walk_init(&walk, req, false);
1260     -
1261     - while (walk.nbytes) {
1262     - src = walk.src.virt.addr;
1263     - dst = walk.dst.virt.addr;
1264     - chunksize = walk.nbytes;
1265     -
1266     - ops->crypt_blocks(state, chunksize, src, dst);
1267     -
1268     - base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
1269     - src += base;
1270     - dst += base;
1271     - chunksize &= AEGIS128_BLOCK_SIZE - 1;
1272     -
1273     - if (chunksize > 0)
1274     - ops->crypt_tail(state, chunksize, src, dst);
1275     + while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
1276     + ops->crypt_blocks(state,
1277     + round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
1278     + walk->src.virt.addr, walk->dst.virt.addr);
1279     + skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
1280     + }
1281    
1282     - skcipher_walk_done(&walk, 0);
1283     + if (walk->nbytes) {
1284     + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
1285     + walk->dst.virt.addr);
1286     + skcipher_walk_done(walk, 0);
1287     }
1288     }
1289    
1290     @@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
1291     {
1292     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1293     struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
1294     + struct skcipher_walk walk;
1295     struct aegis_state state;
1296    
1297     + ops->skcipher_walk_init(&walk, req, true);
1298     +
1299     kernel_fpu_begin();
1300    
1301     crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
1302     crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
1303     - crypto_aegis128_aesni_process_crypt(&state, req, ops);
1304     + crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
1305     crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
1306    
1307     kernel_fpu_end();
1308     diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
1309     index dbe8bb980da1..1b1b39c66c5e 100644
1310     --- a/arch/x86/crypto/aegis128l-aesni-glue.c
1311     +++ b/arch/x86/crypto/aegis128l-aesni-glue.c
1312     @@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
1313     }
1314    
1315     static void crypto_aegis128l_aesni_process_crypt(
1316     - struct aegis_state *state, struct aead_request *req,
1317     + struct aegis_state *state, struct skcipher_walk *walk,
1318     const struct aegis_crypt_ops *ops)
1319     {
1320     - struct skcipher_walk walk;
1321     - u8 *src, *dst;
1322     - unsigned int chunksize, base;
1323     -
1324     - ops->skcipher_walk_init(&walk, req, false);
1325     -
1326     - while (walk.nbytes) {
1327     - src = walk.src.virt.addr;
1328     - dst = walk.dst.virt.addr;
1329     - chunksize = walk.nbytes;
1330     -
1331     - ops->crypt_blocks(state, chunksize, src, dst);
1332     -
1333     - base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
1334     - src += base;
1335     - dst += base;
1336     - chunksize &= AEGIS128L_BLOCK_SIZE - 1;
1337     -
1338     - if (chunksize > 0)
1339     - ops->crypt_tail(state, chunksize, src, dst);
1340     + while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
1341     + ops->crypt_blocks(state, round_down(walk->nbytes,
1342     + AEGIS128L_BLOCK_SIZE),
1343     + walk->src.virt.addr, walk->dst.virt.addr);
1344     + skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
1345     + }
1346    
1347     - skcipher_walk_done(&walk, 0);
1348     + if (walk->nbytes) {
1349     + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
1350     + walk->dst.virt.addr);
1351     + skcipher_walk_done(walk, 0);
1352     }
1353     }
1354    
1355     @@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
1356     {
1357     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1358     struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
1359     + struct skcipher_walk walk;
1360     struct aegis_state state;
1361    
1362     + ops->skcipher_walk_init(&walk, req, true);
1363     +
1364     kernel_fpu_begin();
1365    
1366     crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
1367     crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
1368     - crypto_aegis128l_aesni_process_crypt(&state, req, ops);
1369     + crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
1370     crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
1371    
1372     kernel_fpu_end();
1373     diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
1374     index 8bebda2de92f..6227ca3220a0 100644
1375     --- a/arch/x86/crypto/aegis256-aesni-glue.c
1376     +++ b/arch/x86/crypto/aegis256-aesni-glue.c
1377     @@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
1378     }
1379    
1380     static void crypto_aegis256_aesni_process_crypt(
1381     - struct aegis_state *state, struct aead_request *req,
1382     + struct aegis_state *state, struct skcipher_walk *walk,
1383     const struct aegis_crypt_ops *ops)
1384     {
1385     - struct skcipher_walk walk;
1386     - u8 *src, *dst;
1387     - unsigned int chunksize, base;
1388     -
1389     - ops->skcipher_walk_init(&walk, req, false);
1390     -
1391     - while (walk.nbytes) {
1392     - src = walk.src.virt.addr;
1393     - dst = walk.dst.virt.addr;
1394     - chunksize = walk.nbytes;
1395     -
1396     - ops->crypt_blocks(state, chunksize, src, dst);
1397     -
1398     - base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
1399     - src += base;
1400     - dst += base;
1401     - chunksize &= AEGIS256_BLOCK_SIZE - 1;
1402     -
1403     - if (chunksize > 0)
1404     - ops->crypt_tail(state, chunksize, src, dst);
1405     + while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
1406     + ops->crypt_blocks(state,
1407     + round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
1408     + walk->src.virt.addr, walk->dst.virt.addr);
1409     + skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
1410     + }
1411    
1412     - skcipher_walk_done(&walk, 0);
1413     + if (walk->nbytes) {
1414     + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
1415     + walk->dst.virt.addr);
1416     + skcipher_walk_done(walk, 0);
1417     }
1418     }
1419    
1420     @@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req,
1421     {
1422     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1423     struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
1424     + struct skcipher_walk walk;
1425     struct aegis_state state;
1426    
1427     + ops->skcipher_walk_init(&walk, req, true);
1428     +
1429     kernel_fpu_begin();
1430    
1431     crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
1432     crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
1433     - crypto_aegis256_aesni_process_crypt(&state, req, ops);
1434     + crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
1435     crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
1436    
1437     kernel_fpu_end();
1438     diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
1439     index 1321700d6647..ae30c8b6ec4d 100644
1440     --- a/arch/x86/crypto/aesni-intel_glue.c
1441     +++ b/arch/x86/crypto/aesni-intel_glue.c
1442     @@ -821,11 +821,14 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
1443     scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
1444     }
1445    
1446     - src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
1447     - scatterwalk_start(&src_sg_walk, src_sg);
1448     - if (req->src != req->dst) {
1449     - dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
1450     - scatterwalk_start(&dst_sg_walk, dst_sg);
1451     + if (left) {
1452     + src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
1453     + scatterwalk_start(&src_sg_walk, src_sg);
1454     + if (req->src != req->dst) {
1455     + dst_sg = scatterwalk_ffwd(dst_start, req->dst,
1456     + req->assoclen);
1457     + scatterwalk_start(&dst_sg_walk, dst_sg);
1458     + }
1459     }
1460    
1461     kernel_fpu_begin();
1462     diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
1463     index 0dccdda1eb3a..7e600f8bcdad 100644
1464     --- a/arch/x86/crypto/morus1280_glue.c
1465     +++ b/arch/x86/crypto/morus1280_glue.c
1466     @@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad(
1467    
1468     static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
1469     struct morus1280_ops ops,
1470     - struct aead_request *req)
1471     + struct skcipher_walk *walk)
1472     {
1473     - struct skcipher_walk walk;
1474     - u8 *cursor_src, *cursor_dst;
1475     - unsigned int chunksize, base;
1476     -
1477     - ops.skcipher_walk_init(&walk, req, false);
1478     -
1479     - while (walk.nbytes) {
1480     - cursor_src = walk.src.virt.addr;
1481     - cursor_dst = walk.dst.virt.addr;
1482     - chunksize = walk.nbytes;
1483     -
1484     - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
1485     -
1486     - base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1);
1487     - cursor_src += base;
1488     - cursor_dst += base;
1489     - chunksize &= MORUS1280_BLOCK_SIZE - 1;
1490     -
1491     - if (chunksize > 0)
1492     - ops.crypt_tail(state, cursor_src, cursor_dst,
1493     - chunksize);
1494     + while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
1495     + ops.crypt_blocks(state, walk->src.virt.addr,
1496     + walk->dst.virt.addr,
1497     + round_down(walk->nbytes,
1498     + MORUS1280_BLOCK_SIZE));
1499     + skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
1500     + }
1501    
1502     - skcipher_walk_done(&walk, 0);
1503     + if (walk->nbytes) {
1504     + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
1505     + walk->nbytes);
1506     + skcipher_walk_done(walk, 0);
1507     }
1508     }
1509    
1510     @@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req,
1511     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1512     struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
1513     struct morus1280_state state;
1514     + struct skcipher_walk walk;
1515     +
1516     + ops.skcipher_walk_init(&walk, req, true);
1517    
1518     kernel_fpu_begin();
1519    
1520     ctx->ops->init(&state, &ctx->key, req->iv);
1521     crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
1522     - crypto_morus1280_glue_process_crypt(&state, ops, req);
1523     + crypto_morus1280_glue_process_crypt(&state, ops, &walk);
1524     ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
1525    
1526     kernel_fpu_end();
1527     diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
1528     index 7b58fe4d9bd1..cb3a81732016 100644
1529     --- a/arch/x86/crypto/morus640_glue.c
1530     +++ b/arch/x86/crypto/morus640_glue.c
1531     @@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad(
1532    
1533     static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
1534     struct morus640_ops ops,
1535     - struct aead_request *req)
1536     + struct skcipher_walk *walk)
1537     {
1538     - struct skcipher_walk walk;
1539     - u8 *cursor_src, *cursor_dst;
1540     - unsigned int chunksize, base;
1541     -
1542     - ops.skcipher_walk_init(&walk, req, false);
1543     -
1544     - while (walk.nbytes) {
1545     - cursor_src = walk.src.virt.addr;
1546     - cursor_dst = walk.dst.virt.addr;
1547     - chunksize = walk.nbytes;
1548     -
1549     - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
1550     -
1551     - base = chunksize & ~(MORUS640_BLOCK_SIZE - 1);
1552     - cursor_src += base;
1553     - cursor_dst += base;
1554     - chunksize &= MORUS640_BLOCK_SIZE - 1;
1555     -
1556     - if (chunksize > 0)
1557     - ops.crypt_tail(state, cursor_src, cursor_dst,
1558     - chunksize);
1559     + while (walk->nbytes >= MORUS640_BLOCK_SIZE) {
1560     + ops.crypt_blocks(state, walk->src.virt.addr,
1561     + walk->dst.virt.addr,
1562     + round_down(walk->nbytes, MORUS640_BLOCK_SIZE));
1563     + skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE);
1564     + }
1565    
1566     - skcipher_walk_done(&walk, 0);
1567     + if (walk->nbytes) {
1568     + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
1569     + walk->nbytes);
1570     + skcipher_walk_done(walk, 0);
1571     }
1572     }
1573    
1574     @@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req,
1575     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1576     struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
1577     struct morus640_state state;
1578     + struct skcipher_walk walk;
1579     +
1580     + ops.skcipher_walk_init(&walk, req, true);
1581    
1582     kernel_fpu_begin();
1583    
1584     ctx->ops->init(&state, &ctx->key, req->iv);
1585     crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
1586     - crypto_morus640_glue_process_crypt(&state, ops, req);
1587     + crypto_morus640_glue_process_crypt(&state, ops, &walk);
1588     ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
1589    
1590     kernel_fpu_end();
1591     diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
1592     index 27a461414b30..2690135bf83f 100644
1593     --- a/arch/x86/events/intel/uncore.c
1594     +++ b/arch/x86/events/intel/uncore.c
1595     @@ -740,6 +740,7 @@ static int uncore_pmu_event_init(struct perf_event *event)
1596     /* fixed counters have event field hardcoded to zero */
1597     hwc->config = 0ULL;
1598     } else if (is_freerunning_event(event)) {
1599     + hwc->config = event->attr.config;
1600     if (!check_valid_freerunning_event(box, event))
1601     return -EINVAL;
1602     event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
1603     diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
1604     index cb46d602a6b8..853a49a8ccf6 100644
1605     --- a/arch/x86/events/intel/uncore.h
1606     +++ b/arch/x86/events/intel/uncore.h
1607     @@ -292,8 +292,8 @@ static inline
1608     unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
1609     struct perf_event *event)
1610     {
1611     - unsigned int type = uncore_freerunning_type(event->attr.config);
1612     - unsigned int idx = uncore_freerunning_idx(event->attr.config);
1613     + unsigned int type = uncore_freerunning_type(event->hw.config);
1614     + unsigned int idx = uncore_freerunning_idx(event->hw.config);
1615     struct intel_uncore_pmu *pmu = box->pmu;
1616    
1617     return pmu->type->freerunning[type].counter_base +
1618     @@ -377,7 +377,7 @@ static inline
1619     unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
1620     struct perf_event *event)
1621     {
1622     - unsigned int type = uncore_freerunning_type(event->attr.config);
1623     + unsigned int type = uncore_freerunning_type(event->hw.config);
1624    
1625     return box->pmu->type->freerunning[type].bits;
1626     }
1627     @@ -385,7 +385,7 @@ unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
1628     static inline int uncore_num_freerunning(struct intel_uncore_box *box,
1629     struct perf_event *event)
1630     {
1631     - unsigned int type = uncore_freerunning_type(event->attr.config);
1632     + unsigned int type = uncore_freerunning_type(event->hw.config);
1633    
1634     return box->pmu->type->freerunning[type].num_counters;
1635     }
1636     @@ -399,8 +399,8 @@ static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
1637     static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
1638     struct perf_event *event)
1639     {
1640     - unsigned int type = uncore_freerunning_type(event->attr.config);
1641     - unsigned int idx = uncore_freerunning_idx(event->attr.config);
1642     + unsigned int type = uncore_freerunning_type(event->hw.config);
1643     + unsigned int idx = uncore_freerunning_idx(event->hw.config);
1644    
1645     return (type < uncore_num_freerunning_types(box, event)) &&
1646     (idx < uncore_num_freerunning(box, event));
1647     diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
1648     index 2593b0d7aeee..ef7faf486a1a 100644
1649     --- a/arch/x86/events/intel/uncore_snb.c
1650     +++ b/arch/x86/events/intel/uncore_snb.c
1651     @@ -448,9 +448,11 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
1652    
1653     /* must be done before validate_group */
1654     event->hw.event_base = base;
1655     - event->hw.config = cfg;
1656     event->hw.idx = idx;
1657    
1658     + /* Convert to standard encoding format for freerunning counters */
1659     + event->hw.config = ((cfg - 1) << 8) | 0x10ff;
1660     +
1661     /* no group validation needed, we have free running counters */
1662    
1663     return 0;
1664     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
1665     index 180373360e34..e40be168c73c 100644
1666     --- a/arch/x86/include/asm/kvm_host.h
1667     +++ b/arch/x86/include/asm/kvm_host.h
1668     @@ -1255,7 +1255,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1669     struct kvm_memory_slot *slot,
1670     gfn_t gfn_offset, unsigned long mask);
1671     void kvm_mmu_zap_all(struct kvm *kvm);
1672     -void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
1673     +void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1674     unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
1675     void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
1676    
1677     diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
1678     index 8257a59704ae..763d4264d16a 100644
1679     --- a/arch/x86/kernel/ftrace.c
1680     +++ b/arch/x86/kernel/ftrace.c
1681     @@ -49,7 +49,7 @@ int ftrace_arch_code_modify_post_process(void)
1682     union ftrace_code_union {
1683     char code[MCOUNT_INSN_SIZE];
1684     struct {
1685     - unsigned char e8;
1686     + unsigned char op;
1687     int offset;
1688     } __attribute__((packed));
1689     };
1690     @@ -59,20 +59,23 @@ static int ftrace_calc_offset(long ip, long addr)
1691     return (int)(addr - ip);
1692     }
1693    
1694     -static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
1695     +static unsigned char *
1696     +ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr)
1697     {
1698     static union ftrace_code_union calc;
1699    
1700     - calc.e8 = 0xe8;
1701     + calc.op = op;
1702     calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
1703    
1704     - /*
1705     - * No locking needed, this must be called via kstop_machine
1706     - * which in essence is like running on a uniprocessor machine.
1707     - */
1708     return calc.code;
1709     }
1710    
1711     +static unsigned char *
1712     +ftrace_call_replace(unsigned long ip, unsigned long addr)
1713     +{
1714     + return ftrace_text_replace(0xe8, ip, addr);
1715     +}
1716     +
1717     static inline int
1718     within(unsigned long addr, unsigned long start, unsigned long end)
1719     {
1720     @@ -664,22 +667,6 @@ int __init ftrace_dyn_arch_init(void)
1721     return 0;
1722     }
1723    
1724     -#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
1725     -static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
1726     -{
1727     - static union ftrace_code_union calc;
1728     -
1729     - /* Jmp not a call (ignore the .e8) */
1730     - calc.e8 = 0xe9;
1731     - calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
1732     -
1733     - /*
1734     - * ftrace external locks synchronize the access to the static variable.
1735     - */
1736     - return calc.code;
1737     -}
1738     -#endif
1739     -
1740     /* Currently only x86_64 supports dynamic trampolines */
1741     #ifdef CONFIG_X86_64
1742    
1743     @@ -891,8 +878,8 @@ static void *addr_from_call(void *ptr)
1744     return NULL;
1745    
1746     /* Make sure this is a call */
1747     - if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
1748     - pr_warn("Expected e8, got %x\n", calc.e8);
1749     + if (WARN_ON_ONCE(calc.op != 0xe8)) {
1750     + pr_warn("Expected e8, got %x\n", calc.op);
1751     return NULL;
1752     }
1753    
1754     @@ -963,6 +950,11 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
1755     #ifdef CONFIG_DYNAMIC_FTRACE
1756     extern void ftrace_graph_call(void);
1757    
1758     +static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
1759     +{
1760     + return ftrace_text_replace(0xe9, ip, addr);
1761     +}
1762     +
1763     static int ftrace_mod_jmp(unsigned long ip, void *func)
1764     {
1765     unsigned char *new;
1766     diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
1767     index 6adf6e6c2933..544bd41a514c 100644
1768     --- a/arch/x86/kernel/kprobes/opt.c
1769     +++ b/arch/x86/kernel/kprobes/opt.c
1770     @@ -141,6 +141,11 @@ asm (
1771    
1772     void optprobe_template_func(void);
1773     STACK_FRAME_NON_STANDARD(optprobe_template_func);
1774     +NOKPROBE_SYMBOL(optprobe_template_func);
1775     +NOKPROBE_SYMBOL(optprobe_template_entry);
1776     +NOKPROBE_SYMBOL(optprobe_template_val);
1777     +NOKPROBE_SYMBOL(optprobe_template_call);
1778     +NOKPROBE_SYMBOL(optprobe_template_end);
1779    
1780     #define TMPL_MOVE_IDX \
1781     ((long)optprobe_template_val - (long)optprobe_template_entry)
1782     diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
1783     index e811d4d1c824..d908a37bf3f3 100644
1784     --- a/arch/x86/kernel/kvmclock.c
1785     +++ b/arch/x86/kernel/kvmclock.c
1786     @@ -104,12 +104,8 @@ static u64 kvm_sched_clock_read(void)
1787    
1788     static inline void kvm_sched_clock_init(bool stable)
1789     {
1790     - if (!stable) {
1791     - pv_ops.time.sched_clock = kvm_clock_read;
1792     + if (!stable)
1793     clear_sched_clock_stable();
1794     - return;
1795     - }
1796     -
1797     kvm_sched_clock_offset = kvm_clock_read();
1798     pv_ops.time.sched_clock = kvm_sched_clock_read;
1799    
1800     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1801     index f2d1d230d5b8..9ab33cab9486 100644
1802     --- a/arch/x86/kvm/mmu.c
1803     +++ b/arch/x86/kvm/mmu.c
1804     @@ -5635,13 +5635,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
1805     {
1806     struct kvm_memslots *slots;
1807     struct kvm_memory_slot *memslot;
1808     - bool flush_tlb = true;
1809     - bool flush = false;
1810     int i;
1811    
1812     - if (kvm_available_flush_tlb_with_range())
1813     - flush_tlb = false;
1814     -
1815     spin_lock(&kvm->mmu_lock);
1816     for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1817     slots = __kvm_memslots(kvm, i);
1818     @@ -5653,17 +5648,12 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
1819     if (start >= end)
1820     continue;
1821    
1822     - flush |= slot_handle_level_range(kvm, memslot,
1823     - kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
1824     - PT_MAX_HUGEPAGE_LEVEL, start,
1825     - end - 1, flush_tlb);
1826     + slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
1827     + PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
1828     + start, end - 1, true);
1829     }
1830     }
1831    
1832     - if (flush)
1833     - kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
1834     - gfn_end - gfn_start + 1);
1835     -
1836     spin_unlock(&kvm->mmu_lock);
1837     }
1838    
1839     @@ -5901,13 +5891,30 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
1840     return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
1841     }
1842    
1843     -void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
1844     +void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
1845     {
1846     + gen &= MMIO_GEN_MASK;
1847     +
1848     + /*
1849     + * Shift to eliminate the "update in-progress" flag, which isn't
1850     + * included in the spte's generation number.
1851     + */
1852     + gen >>= 1;
1853     +
1854     + /*
1855     + * Generation numbers are incremented in multiples of the number of
1856     + * address spaces in order to provide unique generations across all
1857     + * address spaces. Strip what is effectively the address space
1858     + * modifier prior to checking for a wrap of the MMIO generation so
1859     + * that a wrap in any address space is detected.
1860     + */
1861     + gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
1862     +
1863     /*
1864     - * The very rare case: if the generation-number is round,
1865     + * The very rare case: if the MMIO generation number has wrapped,
1866     * zap all shadow pages.
1867     */
1868     - if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
1869     + if (unlikely(gen == 0)) {
1870     kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
1871     kvm_mmu_invalidate_zap_all_pages(kvm);
1872     }
1873     diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1874     index d737a51a53ca..f014e1aeee96 100644
1875     --- a/arch/x86/kvm/vmx/nested.c
1876     +++ b/arch/x86/kvm/vmx/nested.c
1877     @@ -2765,7 +2765,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
1878     "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
1879    
1880     /* Check if vmlaunch or vmresume is needed */
1881     - "cmpl $0, %c[launched](%% " _ASM_CX")\n\t"
1882     + "cmpb $0, %c[launched](%% " _ASM_CX")\n\t"
1883    
1884     "call vmx_vmenter\n\t"
1885    
1886     @@ -4035,25 +4035,50 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
1887     /* Addr = segment_base + offset */
1888     /* offset = base + [index * scale] + displacement */
1889     off = exit_qualification; /* holds the displacement */
1890     + if (addr_size == 1)
1891     + off = (gva_t)sign_extend64(off, 31);
1892     + else if (addr_size == 0)
1893     + off = (gva_t)sign_extend64(off, 15);
1894     if (base_is_valid)
1895     off += kvm_register_read(vcpu, base_reg);
1896     if (index_is_valid)
1897     off += kvm_register_read(vcpu, index_reg)<<scaling;
1898     vmx_get_segment(vcpu, &s, seg_reg);
1899     - *ret = s.base + off;
1900    
1901     + /*
1902     + * The effective address, i.e. @off, of a memory operand is truncated
1903     + * based on the address size of the instruction. Note that this is
1904     + * the *effective address*, i.e. the address prior to accounting for
1905     + * the segment's base.
1906     + */
1907     if (addr_size == 1) /* 32 bit */
1908     - *ret &= 0xffffffff;
1909     + off &= 0xffffffff;
1910     + else if (addr_size == 0) /* 16 bit */
1911     + off &= 0xffff;
1912    
1913     /* Checks for #GP/#SS exceptions. */
1914     exn = false;
1915     if (is_long_mode(vcpu)) {
1916     + /*
1917     + * The virtual/linear address is never truncated in 64-bit
1918     + * mode, e.g. a 32-bit address size can yield a 64-bit virtual
1919     + * address when using FS/GS with a non-zero base.
1920     + */
1921     + *ret = s.base + off;
1922     +
1923     /* Long mode: #GP(0)/#SS(0) if the memory address is in a
1924     * non-canonical form. This is the only check on the memory
1925     * destination for long mode!
1926     */
1927     exn = is_noncanonical_address(*ret, vcpu);
1928     } else if (is_protmode(vcpu)) {
1929     + /*
1930     + * When not in long mode, the virtual/linear address is
1931     + * unconditionally truncated to 32 bits regardless of the
1932     + * address size.
1933     + */
1934     + *ret = (s.base + off) & 0xffffffff;
1935     +
1936     /* Protected mode: apply checks for segment validity in the
1937     * following order:
1938     * - segment type check (#GP(0) may be thrown)
1939     @@ -4077,10 +4102,16 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
1940     /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
1941     */
1942     exn = (s.unusable != 0);
1943     - /* Protected mode: #GP(0)/#SS(0) if the memory
1944     - * operand is outside the segment limit.
1945     +
1946     + /*
1947     + * Protected mode: #GP(0)/#SS(0) if the memory operand is
1948     + * outside the segment limit. All CPUs that support VMX ignore
1949     + * limit checks for flat segments, i.e. segments with base==0,
1950     + * limit==0xffffffff and of type expand-up data or code.
1951     */
1952     - exn = exn || (off + sizeof(u64) > s.limit);
1953     + if (!(s.base == 0 && s.limit == 0xffffffff &&
1954     + ((s.type & 8) || !(s.type & 4))))
1955     + exn = exn || (off + sizeof(u64) > s.limit);
1956     }
1957     if (exn) {
1958     kvm_queue_exception_e(vcpu,
1959     diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
1960     index 30a6bcd735ec..d86eee07d327 100644
1961     --- a/arch/x86/kvm/vmx/vmx.c
1962     +++ b/arch/x86/kvm/vmx/vmx.c
1963     @@ -6399,7 +6399,7 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
1964     "mov %%" _ASM_AX", %%cr2 \n\t"
1965     "3: \n\t"
1966     /* Check if vmlaunch or vmresume is needed */
1967     - "cmpl $0, %c[launched](%%" _ASM_CX ") \n\t"
1968     + "cmpb $0, %c[launched](%%" _ASM_CX ") \n\t"
1969     /* Load guest registers. Don't clobber flags. */
1970     "mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
1971     "mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t"
1972     @@ -6449,10 +6449,15 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
1973     "mov %%r13, %c[r13](%%" _ASM_CX ") \n\t"
1974     "mov %%r14, %c[r14](%%" _ASM_CX ") \n\t"
1975     "mov %%r15, %c[r15](%%" _ASM_CX ") \n\t"
1976     +
1977     /*
1978     - * Clear host registers marked as clobbered to prevent
1979     - * speculative use.
1980     - */
1981     + * Clear all general purpose registers (except RSP, which is loaded by
1982     + * the CPU during VM-Exit) to prevent speculative use of the guest's
1983     + * values, even those that are saved/loaded via the stack. In theory,
1984     + * an L1 cache miss when restoring registers could lead to speculative
1985     + * execution with the guest's values. Zeroing XORs are dirt cheap,
1986     + * i.e. the extra paranoia is essentially free.
1987     + */
1988     "xor %%r8d, %%r8d \n\t"
1989     "xor %%r9d, %%r9d \n\t"
1990     "xor %%r10d, %%r10d \n\t"
1991     @@ -6467,8 +6472,11 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
1992    
1993     "xor %%eax, %%eax \n\t"
1994     "xor %%ebx, %%ebx \n\t"
1995     + "xor %%ecx, %%ecx \n\t"
1996     + "xor %%edx, %%edx \n\t"
1997     "xor %%esi, %%esi \n\t"
1998     "xor %%edi, %%edi \n\t"
1999     + "xor %%ebp, %%ebp \n\t"
2000     "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
2001     : ASM_CALL_CONSTRAINT
2002     : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp),
2003     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2004     index 941f932373d0..2bcef72a7c40 100644
2005     --- a/arch/x86/kvm/x86.c
2006     +++ b/arch/x86/kvm/x86.c
2007     @@ -9348,13 +9348,13 @@ out_free:
2008     return -ENOMEM;
2009     }
2010    
2011     -void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
2012     +void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
2013     {
2014     /*
2015     * memslots->generation has been incremented.
2016     * mmio generation may have reached its maximum value.
2017     */
2018     - kvm_mmu_invalidate_mmio_sptes(kvm, slots);
2019     + kvm_mmu_invalidate_mmio_sptes(kvm, gen);
2020     }
2021    
2022     int kvm_arch_prepare_memory_region(struct kvm *kvm,
2023     diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
2024     index 224cd0a47568..20ede17202bf 100644
2025     --- a/arch/x86/kvm/x86.h
2026     +++ b/arch/x86/kvm/x86.h
2027     @@ -181,6 +181,11 @@ static inline bool emul_is_noncanonical_address(u64 la,
2028     static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
2029     gva_t gva, gfn_t gfn, unsigned access)
2030     {
2031     + u64 gen = kvm_memslots(vcpu->kvm)->generation;
2032     +
2033     + if (unlikely(gen & 1))
2034     + return;
2035     +
2036     /*
2037     * If this is a shadow nested page table, the "GVA" is
2038     * actually a nGPA.
2039     @@ -188,7 +193,7 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
2040     vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
2041     vcpu->arch.access = access;
2042     vcpu->arch.mmio_gfn = gfn;
2043     - vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
2044     + vcpu->arch.mmio_gen = gen;
2045     }
2046    
2047     static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
2048     diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
2049     index 0f4fe206dcc2..20701977e6c0 100644
2050     --- a/arch/x86/xen/mmu_pv.c
2051     +++ b/arch/x86/xen/mmu_pv.c
2052     @@ -2114,10 +2114,10 @@ void __init xen_relocate_p2m(void)
2053     pt = early_memremap(pt_phys, PAGE_SIZE);
2054     clear_page(pt);
2055     for (idx_pte = 0;
2056     - idx_pte < min(n_pte, PTRS_PER_PTE);
2057     - idx_pte++) {
2058     - set_pte(pt + idx_pte,
2059     - pfn_pte(p2m_pfn, PAGE_KERNEL));
2060     + idx_pte < min(n_pte, PTRS_PER_PTE);
2061     + idx_pte++) {
2062     + pt[idx_pte] = pfn_pte(p2m_pfn,
2063     + PAGE_KERNEL);
2064     p2m_pfn++;
2065     }
2066     n_pte -= PTRS_PER_PTE;
2067     @@ -2125,8 +2125,7 @@ void __init xen_relocate_p2m(void)
2068     make_lowmem_page_readonly(__va(pt_phys));
2069     pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
2070     PFN_DOWN(pt_phys));
2071     - set_pmd(pmd + idx_pt,
2072     - __pmd(_PAGE_TABLE | pt_phys));
2073     + pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
2074     pt_phys += PAGE_SIZE;
2075     }
2076     n_pt -= PTRS_PER_PMD;
2077     @@ -2134,7 +2133,7 @@ void __init xen_relocate_p2m(void)
2078     make_lowmem_page_readonly(__va(pmd_phys));
2079     pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
2080     PFN_DOWN(pmd_phys));
2081     - set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
2082     + pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
2083     pmd_phys += PAGE_SIZE;
2084     }
2085     n_pmd -= PTRS_PER_PUD;
2086     diff --git a/crypto/aead.c b/crypto/aead.c
2087     index 189c52d1f63a..4908b5e846f0 100644
2088     --- a/crypto/aead.c
2089     +++ b/crypto/aead.c
2090     @@ -61,8 +61,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
2091     else
2092     err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
2093    
2094     - if (err)
2095     + if (unlikely(err)) {
2096     + crypto_aead_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
2097     return err;
2098     + }
2099    
2100     crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
2101     return 0;
2102     diff --git a/crypto/aegis128.c b/crypto/aegis128.c
2103     index c22f4414856d..789716f92e4c 100644
2104     --- a/crypto/aegis128.c
2105     +++ b/crypto/aegis128.c
2106     @@ -290,19 +290,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state,
2107     const struct aegis128_ops *ops)
2108     {
2109     struct skcipher_walk walk;
2110     - u8 *src, *dst;
2111     - unsigned int chunksize;
2112    
2113     ops->skcipher_walk_init(&walk, req, false);
2114    
2115     while (walk.nbytes) {
2116     - src = walk.src.virt.addr;
2117     - dst = walk.dst.virt.addr;
2118     - chunksize = walk.nbytes;
2119     + unsigned int nbytes = walk.nbytes;
2120    
2121     - ops->crypt_chunk(state, dst, src, chunksize);
2122     + if (nbytes < walk.total)
2123     + nbytes = round_down(nbytes, walk.stride);
2124    
2125     - skcipher_walk_done(&walk, 0);
2126     + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
2127     + nbytes);
2128     +
2129     + skcipher_walk_done(&walk, walk.nbytes - nbytes);
2130     }
2131     }
2132    
2133     diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
2134     index b6fb21ebdc3e..73811448cb6b 100644
2135     --- a/crypto/aegis128l.c
2136     +++ b/crypto/aegis128l.c
2137     @@ -353,19 +353,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state,
2138     const struct aegis128l_ops *ops)
2139     {
2140     struct skcipher_walk walk;
2141     - u8 *src, *dst;
2142     - unsigned int chunksize;
2143    
2144     ops->skcipher_walk_init(&walk, req, false);
2145    
2146     while (walk.nbytes) {
2147     - src = walk.src.virt.addr;
2148     - dst = walk.dst.virt.addr;
2149     - chunksize = walk.nbytes;
2150     + unsigned int nbytes = walk.nbytes;
2151    
2152     - ops->crypt_chunk(state, dst, src, chunksize);
2153     + if (nbytes < walk.total)
2154     + nbytes = round_down(nbytes, walk.stride);
2155    
2156     - skcipher_walk_done(&walk, 0);
2157     + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
2158     + nbytes);
2159     +
2160     + skcipher_walk_done(&walk, walk.nbytes - nbytes);
2161     }
2162     }
2163    
2164     diff --git a/crypto/aegis256.c b/crypto/aegis256.c
2165     index 11f0f8ec9c7c..8a71e9c06193 100644
2166     --- a/crypto/aegis256.c
2167     +++ b/crypto/aegis256.c
2168     @@ -303,19 +303,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state,
2169     const struct aegis256_ops *ops)
2170     {
2171     struct skcipher_walk walk;
2172     - u8 *src, *dst;
2173     - unsigned int chunksize;
2174    
2175     ops->skcipher_walk_init(&walk, req, false);
2176    
2177     while (walk.nbytes) {
2178     - src = walk.src.virt.addr;
2179     - dst = walk.dst.virt.addr;
2180     - chunksize = walk.nbytes;
2181     + unsigned int nbytes = walk.nbytes;
2182    
2183     - ops->crypt_chunk(state, dst, src, chunksize);
2184     + if (nbytes < walk.total)
2185     + nbytes = round_down(nbytes, walk.stride);
2186    
2187     - skcipher_walk_done(&walk, 0);
2188     + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
2189     + nbytes);
2190     +
2191     + skcipher_walk_done(&walk, walk.nbytes - nbytes);
2192     }
2193     }
2194    
2195     diff --git a/crypto/ahash.c b/crypto/ahash.c
2196     index 5d320a811f75..81e2767e2164 100644
2197     --- a/crypto/ahash.c
2198     +++ b/crypto/ahash.c
2199     @@ -86,17 +86,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
2200     int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
2201     {
2202     unsigned int alignmask = walk->alignmask;
2203     - unsigned int nbytes = walk->entrylen;
2204    
2205     walk->data -= walk->offset;
2206    
2207     - if (nbytes && walk->offset & alignmask && !err) {
2208     - walk->offset = ALIGN(walk->offset, alignmask + 1);
2209     - nbytes = min(nbytes,
2210     - ((unsigned int)(PAGE_SIZE)) - walk->offset);
2211     - walk->entrylen -= nbytes;
2212     + if (walk->entrylen && (walk->offset & alignmask) && !err) {
2213     + unsigned int nbytes;
2214    
2215     + walk->offset = ALIGN(walk->offset, alignmask + 1);
2216     + nbytes = min(walk->entrylen,
2217     + (unsigned int)(PAGE_SIZE - walk->offset));
2218     if (nbytes) {
2219     + walk->entrylen -= nbytes;
2220     walk->data += walk->offset;
2221     return nbytes;
2222     }
2223     @@ -116,7 +116,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
2224     if (err)
2225     return err;
2226    
2227     - if (nbytes) {
2228     + if (walk->entrylen) {
2229     walk->offset = 0;
2230     walk->pg++;
2231     return hash_walk_next(walk);
2232     @@ -190,6 +190,21 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
2233     return ret;
2234     }
2235    
2236     +static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
2237     + unsigned int keylen)
2238     +{
2239     + return -ENOSYS;
2240     +}
2241     +
2242     +static void ahash_set_needkey(struct crypto_ahash *tfm)
2243     +{
2244     + const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
2245     +
2246     + if (tfm->setkey != ahash_nosetkey &&
2247     + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
2248     + crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
2249     +}
2250     +
2251     int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2252     unsigned int keylen)
2253     {
2254     @@ -201,20 +216,16 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2255     else
2256     err = tfm->setkey(tfm, key, keylen);
2257    
2258     - if (err)
2259     + if (unlikely(err)) {
2260     + ahash_set_needkey(tfm);
2261     return err;
2262     + }
2263    
2264     crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
2265     return 0;
2266     }
2267     EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
2268    
2269     -static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
2270     - unsigned int keylen)
2271     -{
2272     - return -ENOSYS;
2273     -}
2274     -
2275     static inline unsigned int ahash_align_buffer_size(unsigned len,
2276     unsigned long mask)
2277     {
2278     @@ -489,8 +500,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
2279    
2280     if (alg->setkey) {
2281     hash->setkey = alg->setkey;
2282     - if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
2283     - crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
2284     + ahash_set_needkey(hash);
2285     }
2286    
2287     return 0;
2288     diff --git a/crypto/cfb.c b/crypto/cfb.c
2289     index e81e45673498..4abfe32ff845 100644
2290     --- a/crypto/cfb.c
2291     +++ b/crypto/cfb.c
2292     @@ -77,12 +77,14 @@ static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
2293     do {
2294     crypto_cfb_encrypt_one(tfm, iv, dst);
2295     crypto_xor(dst, src, bsize);
2296     - memcpy(iv, dst, bsize);
2297     + iv = dst;
2298    
2299     src += bsize;
2300     dst += bsize;
2301     } while ((nbytes -= bsize) >= bsize);
2302    
2303     + memcpy(walk->iv, iv, bsize);
2304     +
2305     return nbytes;
2306     }
2307    
2308     @@ -162,7 +164,7 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
2309     const unsigned int bsize = crypto_cfb_bsize(tfm);
2310     unsigned int nbytes = walk->nbytes;
2311     u8 *src = walk->src.virt.addr;
2312     - u8 *iv = walk->iv;
2313     + u8 * const iv = walk->iv;
2314     u8 tmp[MAX_CIPHER_BLOCKSIZE];
2315    
2316     do {
2317     @@ -172,8 +174,6 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
2318     src += bsize;
2319     } while ((nbytes -= bsize) >= bsize);
2320    
2321     - memcpy(walk->iv, iv, bsize);
2322     -
2323     return nbytes;
2324     }
2325    
2326     @@ -298,6 +298,12 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
2327     inst->alg.base.cra_blocksize = 1;
2328     inst->alg.base.cra_alignmask = alg->cra_alignmask;
2329    
2330     + /*
2331     + * To simplify the implementation, configure the skcipher walk to only
2332     + * give a partial block at the very end, never earlier.
2333     + */
2334     + inst->alg.chunksize = alg->cra_blocksize;
2335     +
2336     inst->alg.ivsize = alg->cra_blocksize;
2337     inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
2338     inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
2339     diff --git a/crypto/morus1280.c b/crypto/morus1280.c
2340     index 3889c188f266..b83576b4eb55 100644
2341     --- a/crypto/morus1280.c
2342     +++ b/crypto/morus1280.c
2343     @@ -366,18 +366,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state,
2344     const struct morus1280_ops *ops)
2345     {
2346     struct skcipher_walk walk;
2347     - u8 *dst;
2348     - const u8 *src;
2349    
2350     ops->skcipher_walk_init(&walk, req, false);
2351    
2352     while (walk.nbytes) {
2353     - src = walk.src.virt.addr;
2354     - dst = walk.dst.virt.addr;
2355     + unsigned int nbytes = walk.nbytes;
2356    
2357     - ops->crypt_chunk(state, dst, src, walk.nbytes);
2358     + if (nbytes < walk.total)
2359     + nbytes = round_down(nbytes, walk.stride);
2360    
2361     - skcipher_walk_done(&walk, 0);
2362     + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
2363     + nbytes);
2364     +
2365     + skcipher_walk_done(&walk, walk.nbytes - nbytes);
2366     }
2367     }
2368    
2369     diff --git a/crypto/morus640.c b/crypto/morus640.c
2370     index da06ec2f6a80..b6a477444f6d 100644
2371     --- a/crypto/morus640.c
2372     +++ b/crypto/morus640.c
2373     @@ -365,18 +365,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state,
2374     const struct morus640_ops *ops)
2375     {
2376     struct skcipher_walk walk;
2377     - u8 *dst;
2378     - const u8 *src;
2379    
2380     ops->skcipher_walk_init(&walk, req, false);
2381    
2382     while (walk.nbytes) {
2383     - src = walk.src.virt.addr;
2384     - dst = walk.dst.virt.addr;
2385     + unsigned int nbytes = walk.nbytes;
2386    
2387     - ops->crypt_chunk(state, dst, src, walk.nbytes);
2388     + if (nbytes < walk.total)
2389     + nbytes = round_down(nbytes, walk.stride);
2390    
2391     - skcipher_walk_done(&walk, 0);
2392     + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
2393     + nbytes);
2394     +
2395     + skcipher_walk_done(&walk, walk.nbytes - nbytes);
2396     }
2397     }
2398    
2399     diff --git a/crypto/ofb.c b/crypto/ofb.c
2400     index 886631708c5e..cab0b80953fe 100644
2401     --- a/crypto/ofb.c
2402     +++ b/crypto/ofb.c
2403     @@ -5,9 +5,6 @@
2404     *
2405     * Copyright (C) 2018 ARM Limited or its affiliates.
2406     * All rights reserved.
2407     - *
2408     - * Based loosely on public domain code gleaned from libtomcrypt
2409     - * (https://github.com/libtom/libtomcrypt).
2410     */
2411    
2412     #include <crypto/algapi.h>
2413     @@ -21,7 +18,6 @@
2414    
2415     struct crypto_ofb_ctx {
2416     struct crypto_cipher *child;
2417     - int cnt;
2418     };
2419    
2420    
2421     @@ -41,58 +37,40 @@ static int crypto_ofb_setkey(struct crypto_skcipher *parent, const u8 *key,
2422     return err;
2423     }
2424    
2425     -static int crypto_ofb_encrypt_segment(struct crypto_ofb_ctx *ctx,
2426     - struct skcipher_walk *walk,
2427     - struct crypto_cipher *tfm)
2428     +static int crypto_ofb_crypt(struct skcipher_request *req)
2429     {
2430     - int bsize = crypto_cipher_blocksize(tfm);
2431     - int nbytes = walk->nbytes;
2432     -
2433     - u8 *src = walk->src.virt.addr;
2434     - u8 *dst = walk->dst.virt.addr;
2435     - u8 *iv = walk->iv;
2436     -
2437     - do {
2438     - if (ctx->cnt == bsize) {
2439     - if (nbytes < bsize)
2440     - break;
2441     - crypto_cipher_encrypt_one(tfm, iv, iv);
2442     - ctx->cnt = 0;
2443     - }
2444     - *dst = *src ^ iv[ctx->cnt];
2445     - src++;
2446     - dst++;
2447     - ctx->cnt++;
2448     - } while (--nbytes);
2449     - return nbytes;
2450     -}
2451     -
2452     -static int crypto_ofb_encrypt(struct skcipher_request *req)
2453     -{
2454     - struct skcipher_walk walk;
2455     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
2456     - unsigned int bsize;
2457     struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
2458     - struct crypto_cipher *child = ctx->child;
2459     - int ret = 0;
2460     + struct crypto_cipher *cipher = ctx->child;
2461     + const unsigned int bsize = crypto_cipher_blocksize(cipher);
2462     + struct skcipher_walk walk;
2463     + int err;
2464    
2465     - bsize = crypto_cipher_blocksize(child);
2466     - ctx->cnt = bsize;
2467     + err = skcipher_walk_virt(&walk, req, false);
2468    
2469     - ret = skcipher_walk_virt(&walk, req, false);
2470     + while (walk.nbytes >= bsize) {
2471     + const u8 *src = walk.src.virt.addr;
2472     + u8 *dst = walk.dst.virt.addr;
2473     + u8 * const iv = walk.iv;
2474     + unsigned int nbytes = walk.nbytes;
2475    
2476     - while (walk.nbytes) {
2477     - ret = crypto_ofb_encrypt_segment(ctx, &walk, child);
2478     - ret = skcipher_walk_done(&walk, ret);
2479     - }
2480     + do {
2481     + crypto_cipher_encrypt_one(cipher, iv, iv);
2482     + crypto_xor_cpy(dst, src, iv, bsize);
2483     + dst += bsize;
2484     + src += bsize;
2485     + } while ((nbytes -= bsize) >= bsize);
2486    
2487     - return ret;
2488     -}
2489     + err = skcipher_walk_done(&walk, nbytes);
2490     + }
2491    
2492     -/* OFB encrypt and decrypt are identical */
2493     -static int crypto_ofb_decrypt(struct skcipher_request *req)
2494     -{
2495     - return crypto_ofb_encrypt(req);
2496     + if (walk.nbytes) {
2497     + crypto_cipher_encrypt_one(cipher, walk.iv, walk.iv);
2498     + crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, walk.iv,
2499     + walk.nbytes);
2500     + err = skcipher_walk_done(&walk, 0);
2501     + }
2502     + return err;
2503     }
2504    
2505     static int crypto_ofb_init_tfm(struct crypto_skcipher *tfm)
2506     @@ -165,13 +143,18 @@ static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
2507     if (err)
2508     goto err_drop_spawn;
2509    
2510     + /* OFB mode is a stream cipher. */
2511     + inst->alg.base.cra_blocksize = 1;
2512     +
2513     + /*
2514     + * To simplify the implementation, configure the skcipher walk to only
2515     + * give a partial block at the very end, never earlier.
2516     + */
2517     + inst->alg.chunksize = alg->cra_blocksize;
2518     +
2519     inst->alg.base.cra_priority = alg->cra_priority;
2520     - inst->alg.base.cra_blocksize = alg->cra_blocksize;
2521     inst->alg.base.cra_alignmask = alg->cra_alignmask;
2522    
2523     - /* We access the data as u32s when xoring. */
2524     - inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
2525     -
2526     inst->alg.ivsize = alg->cra_blocksize;
2527     inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
2528     inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
2529     @@ -182,8 +165,8 @@ static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
2530     inst->alg.exit = crypto_ofb_exit_tfm;
2531    
2532     inst->alg.setkey = crypto_ofb_setkey;
2533     - inst->alg.encrypt = crypto_ofb_encrypt;
2534     - inst->alg.decrypt = crypto_ofb_decrypt;
2535     + inst->alg.encrypt = crypto_ofb_crypt;
2536     + inst->alg.decrypt = crypto_ofb_crypt;
2537    
2538     inst->free = crypto_ofb_free;
2539    
2540     diff --git a/crypto/pcbc.c b/crypto/pcbc.c
2541     index 8aa10144407c..1b182dfedc94 100644
2542     --- a/crypto/pcbc.c
2543     +++ b/crypto/pcbc.c
2544     @@ -51,7 +51,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
2545     unsigned int nbytes = walk->nbytes;
2546     u8 *src = walk->src.virt.addr;
2547     u8 *dst = walk->dst.virt.addr;
2548     - u8 *iv = walk->iv;
2549     + u8 * const iv = walk->iv;
2550    
2551     do {
2552     crypto_xor(iv, src, bsize);
2553     @@ -72,7 +72,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
2554     int bsize = crypto_cipher_blocksize(tfm);
2555     unsigned int nbytes = walk->nbytes;
2556     u8 *src = walk->src.virt.addr;
2557     - u8 *iv = walk->iv;
2558     + u8 * const iv = walk->iv;
2559     u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
2560    
2561     do {
2562     @@ -84,8 +84,6 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
2563     src += bsize;
2564     } while ((nbytes -= bsize) >= bsize);
2565    
2566     - memcpy(walk->iv, iv, bsize);
2567     -
2568     return nbytes;
2569     }
2570    
2571     @@ -121,7 +119,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
2572     unsigned int nbytes = walk->nbytes;
2573     u8 *src = walk->src.virt.addr;
2574     u8 *dst = walk->dst.virt.addr;
2575     - u8 *iv = walk->iv;
2576     + u8 * const iv = walk->iv;
2577    
2578     do {
2579     crypto_cipher_decrypt_one(tfm, dst, src);
2580     @@ -132,8 +130,6 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
2581     dst += bsize;
2582     } while ((nbytes -= bsize) >= bsize);
2583    
2584     - memcpy(walk->iv, iv, bsize);
2585     -
2586     return nbytes;
2587     }
2588    
2589     @@ -144,7 +140,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
2590     int bsize = crypto_cipher_blocksize(tfm);
2591     unsigned int nbytes = walk->nbytes;
2592     u8 *src = walk->src.virt.addr;
2593     - u8 *iv = walk->iv;
2594     + u8 * const iv = walk->iv;
2595     u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
2596    
2597     do {
2598     @@ -156,8 +152,6 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
2599     src += bsize;
2600     } while ((nbytes -= bsize) >= bsize);
2601    
2602     - memcpy(walk->iv, iv, bsize);
2603     -
2604     return nbytes;
2605     }
2606    
2607     diff --git a/crypto/shash.c b/crypto/shash.c
2608     index 44d297b82a8f..40311ccad3fa 100644
2609     --- a/crypto/shash.c
2610     +++ b/crypto/shash.c
2611     @@ -53,6 +53,13 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
2612     return err;
2613     }
2614    
2615     +static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
2616     +{
2617     + if (crypto_shash_alg_has_setkey(alg) &&
2618     + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
2619     + crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
2620     +}
2621     +
2622     int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
2623     unsigned int keylen)
2624     {
2625     @@ -65,8 +72,10 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
2626     else
2627     err = shash->setkey(tfm, key, keylen);
2628    
2629     - if (err)
2630     + if (unlikely(err)) {
2631     + shash_set_needkey(tfm, shash);
2632     return err;
2633     + }
2634    
2635     crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
2636     return 0;
2637     @@ -373,7 +382,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
2638     crt->final = shash_async_final;
2639     crt->finup = shash_async_finup;
2640     crt->digest = shash_async_digest;
2641     - crt->setkey = shash_async_setkey;
2642     + if (crypto_shash_alg_has_setkey(alg))
2643     + crt->setkey = shash_async_setkey;
2644    
2645     crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
2646     CRYPTO_TFM_NEED_KEY);
2647     @@ -395,9 +405,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
2648    
2649     hash->descsize = alg->descsize;
2650    
2651     - if (crypto_shash_alg_has_setkey(alg) &&
2652     - !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
2653     - crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
2654     + shash_set_needkey(hash, alg);
2655    
2656     return 0;
2657     }
2658     diff --git a/crypto/skcipher.c b/crypto/skcipher.c
2659     index 2a969296bc24..de09ff60991e 100644
2660     --- a/crypto/skcipher.c
2661     +++ b/crypto/skcipher.c
2662     @@ -585,6 +585,12 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
2663     return crypto_alg_extsize(alg);
2664     }
2665    
2666     +static void skcipher_set_needkey(struct crypto_skcipher *tfm)
2667     +{
2668     + if (tfm->keysize)
2669     + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
2670     +}
2671     +
2672     static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
2673     const u8 *key, unsigned int keylen)
2674     {
2675     @@ -598,8 +604,10 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
2676     err = crypto_blkcipher_setkey(blkcipher, key, keylen);
2677     crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
2678     CRYPTO_TFM_RES_MASK);
2679     - if (err)
2680     + if (unlikely(err)) {
2681     + skcipher_set_needkey(tfm);
2682     return err;
2683     + }
2684    
2685     crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
2686     return 0;
2687     @@ -677,8 +685,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
2688     skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
2689     skcipher->keysize = calg->cra_blkcipher.max_keysize;
2690    
2691     - if (skcipher->keysize)
2692     - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
2693     + skcipher_set_needkey(skcipher);
2694    
2695     return 0;
2696     }
2697     @@ -698,8 +705,10 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
2698     crypto_skcipher_set_flags(tfm,
2699     crypto_ablkcipher_get_flags(ablkcipher) &
2700     CRYPTO_TFM_RES_MASK);
2701     - if (err)
2702     + if (unlikely(err)) {
2703     + skcipher_set_needkey(tfm);
2704     return err;
2705     + }
2706    
2707     crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
2708     return 0;
2709     @@ -776,8 +785,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
2710     sizeof(struct ablkcipher_request);
2711     skcipher->keysize = calg->cra_ablkcipher.max_keysize;
2712    
2713     - if (skcipher->keysize)
2714     - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
2715     + skcipher_set_needkey(skcipher);
2716    
2717     return 0;
2718     }
2719     @@ -820,8 +828,10 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
2720     else
2721     err = cipher->setkey(tfm, key, keylen);
2722    
2723     - if (err)
2724     + if (unlikely(err)) {
2725     + skcipher_set_needkey(tfm);
2726     return err;
2727     + }
2728    
2729     crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
2730     return 0;
2731     @@ -852,8 +862,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
2732     skcipher->ivsize = alg->ivsize;
2733     skcipher->keysize = alg->max_keysize;
2734    
2735     - if (skcipher->keysize)
2736     - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
2737     + skcipher_set_needkey(skcipher);
2738    
2739     if (alg->exit)
2740     skcipher->base.exit = crypto_skcipher_exit_tfm;
2741     diff --git a/crypto/testmgr.c b/crypto/testmgr.c
2742     index 0f684a414acb..b8e4a3ccbfe0 100644
2743     --- a/crypto/testmgr.c
2744     +++ b/crypto/testmgr.c
2745     @@ -1894,14 +1894,21 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
2746    
2747     err = alg_test_hash(desc, driver, type, mask);
2748     if (err)
2749     - goto out;
2750     + return err;
2751    
2752     tfm = crypto_alloc_shash(driver, type, mask);
2753     if (IS_ERR(tfm)) {
2754     + if (PTR_ERR(tfm) == -ENOENT) {
2755     + /*
2756     + * This crc32c implementation is only available through
2757     + * ahash API, not the shash API, so the remaining part
2758     + * of the test is not applicable to it.
2759     + */
2760     + return 0;
2761     + }
2762     printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
2763     "%ld\n", driver, PTR_ERR(tfm));
2764     - err = PTR_ERR(tfm);
2765     - goto out;
2766     + return PTR_ERR(tfm);
2767     }
2768    
2769     do {
2770     @@ -1928,7 +1935,6 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
2771    
2772     crypto_free_shash(tfm);
2773    
2774     -out:
2775     return err;
2776     }
2777    
2778     diff --git a/crypto/testmgr.h b/crypto/testmgr.h
2779     index e8f47d7b92cd..ca8e8ebef309 100644
2780     --- a/crypto/testmgr.h
2781     +++ b/crypto/testmgr.h
2782     @@ -12870,6 +12870,31 @@ static const struct cipher_testvec aes_cfb_tv_template[] = {
2783     "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
2784     "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
2785     .len = 64,
2786     + .also_non_np = 1,
2787     + .np = 2,
2788     + .tap = { 31, 33 },
2789     + }, { /* > 16 bytes, not a multiple of 16 bytes */
2790     + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
2791     + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
2792     + .klen = 16,
2793     + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
2794     + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2795     + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
2796     + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
2797     + "\xae",
2798     + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
2799     + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
2800     + "\xc8",
2801     + .len = 17,
2802     + }, { /* < 16 bytes */
2803     + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
2804     + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
2805     + .klen = 16,
2806     + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
2807     + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2808     + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
2809     + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
2810     + .len = 7,
2811     },
2812     };
2813    
2814     @@ -16656,8 +16681,7 @@ static const struct cipher_testvec aes_ctr_rfc3686_tv_template[] = {
2815     };
2816    
2817     static const struct cipher_testvec aes_ofb_tv_template[] = {
2818     - /* From NIST Special Publication 800-38A, Appendix F.5 */
2819     - {
2820     + { /* From NIST Special Publication 800-38A, Appendix F.5 */
2821     .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
2822     "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
2823     .klen = 16,
2824     @@ -16680,6 +16704,31 @@ static const struct cipher_testvec aes_ofb_tv_template[] = {
2825     "\x30\x4c\x65\x28\xf6\x59\xc7\x78"
2826     "\x66\xa5\x10\xd9\xc1\xd6\xae\x5e",
2827     .len = 64,
2828     + .also_non_np = 1,
2829     + .np = 2,
2830     + .tap = { 31, 33 },
2831     + }, { /* > 16 bytes, not a multiple of 16 bytes */
2832     + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
2833     + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
2834     + .klen = 16,
2835     + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
2836     + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2837     + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
2838     + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
2839     + "\xae",
2840     + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
2841     + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
2842     + "\x77",
2843     + .len = 17,
2844     + }, { /* < 16 bytes */
2845     + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
2846     + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
2847     + .klen = 16,
2848     + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
2849     + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2850     + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
2851     + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
2852     + .len = 7,
2853     }
2854     };
2855    
2856     diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
2857     index 545e91420cde..8940054d6250 100644
2858     --- a/drivers/acpi/device_sysfs.c
2859     +++ b/drivers/acpi/device_sysfs.c
2860     @@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
2861     {
2862     struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
2863     const union acpi_object *of_compatible, *obj;
2864     + acpi_status status;
2865     int len, count;
2866     int i, nval;
2867     char *c;
2868    
2869     - acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
2870     + status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
2871     + if (ACPI_FAILURE(status))
2872     + return -ENODEV;
2873     +
2874     /* DT strings are all in lower case */
2875     for (c = buf.pointer; *c != '\0'; c++)
2876     *c = tolower(*c);
2877     diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
2878     index e18ade5d74e9..f75f8f870ce3 100644
2879     --- a/drivers/acpi/nfit/core.c
2880     +++ b/drivers/acpi/nfit/core.c
2881     @@ -415,7 +415,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
2882     if (call_pkg) {
2883     int i;
2884    
2885     - if (nfit_mem->family != call_pkg->nd_family)
2886     + if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
2887     return -ENOTTY;
2888    
2889     for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
2890     @@ -424,6 +424,10 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
2891     return call_pkg->nd_command;
2892     }
2893    
2894     + /* In the !call_pkg case, bus commands == bus functions */
2895     + if (!nfit_mem)
2896     + return cmd;
2897     +
2898     /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
2899     if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
2900     return cmd;
2901     @@ -454,17 +458,18 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
2902     if (cmd_rc)
2903     *cmd_rc = -EINVAL;
2904    
2905     + if (cmd == ND_CMD_CALL)
2906     + call_pkg = buf;
2907     + func = cmd_to_func(nfit_mem, cmd, call_pkg);
2908     + if (func < 0)
2909     + return func;
2910     +
2911     if (nvdimm) {
2912     struct acpi_device *adev = nfit_mem->adev;
2913    
2914     if (!adev)
2915     return -ENOTTY;
2916    
2917     - if (cmd == ND_CMD_CALL)
2918     - call_pkg = buf;
2919     - func = cmd_to_func(nfit_mem, cmd, call_pkg);
2920     - if (func < 0)
2921     - return func;
2922     dimm_name = nvdimm_name(nvdimm);
2923     cmd_name = nvdimm_cmd_name(cmd);
2924     cmd_mask = nvdimm_cmd_mask(nvdimm);
2925     @@ -475,12 +480,9 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
2926     } else {
2927     struct acpi_device *adev = to_acpi_dev(acpi_desc);
2928    
2929     - func = cmd;
2930     cmd_name = nvdimm_bus_cmd_name(cmd);
2931     cmd_mask = nd_desc->cmd_mask;
2932     - dsm_mask = cmd_mask;
2933     - if (cmd == ND_CMD_CALL)
2934     - dsm_mask = nd_desc->bus_dsm_mask;
2935     + dsm_mask = nd_desc->bus_dsm_mask;
2936     desc = nd_cmd_bus_desc(cmd);
2937     guid = to_nfit_uuid(NFIT_DEV_BUS);
2938     handle = adev->handle;
2939     @@ -554,6 +556,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
2940     return -EINVAL;
2941     }
2942    
2943     + if (out_obj->type != ACPI_TYPE_BUFFER) {
2944     + dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
2945     + dimm_name, cmd_name, out_obj->type);
2946     + rc = -EINVAL;
2947     + goto out;
2948     + }
2949     +
2950     if (call_pkg) {
2951     call_pkg->nd_fw_size = out_obj->buffer.length;
2952     memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
2953     @@ -572,13 +581,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
2954     return 0;
2955     }
2956    
2957     - if (out_obj->package.type != ACPI_TYPE_BUFFER) {
2958     - dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
2959     - dimm_name, cmd_name, out_obj->type);
2960     - rc = -EINVAL;
2961     - goto out;
2962     - }
2963     -
2964     dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
2965     cmd_name, out_obj->buffer.length);
2966     print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
2967     @@ -1759,14 +1761,14 @@ static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
2968    
2969     __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
2970     {
2971     + struct device *dev = &nfit_mem->adev->dev;
2972     struct nd_intel_smart smart = { 0 };
2973     union acpi_object in_buf = {
2974     - .type = ACPI_TYPE_BUFFER,
2975     - .buffer.pointer = (char *) &smart,
2976     - .buffer.length = sizeof(smart),
2977     + .buffer.type = ACPI_TYPE_BUFFER,
2978     + .buffer.length = 0,
2979     };
2980     union acpi_object in_obj = {
2981     - .type = ACPI_TYPE_PACKAGE,
2982     + .package.type = ACPI_TYPE_PACKAGE,
2983     .package.count = 1,
2984     .package.elements = &in_buf,
2985     };
2986     @@ -1781,8 +1783,15 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
2987     return;
2988    
2989     out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
2990     - if (!out_obj)
2991     + if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
2992     + || out_obj->buffer.length < sizeof(smart)) {
2993     + dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
2994     + dev_name(dev));
2995     + ACPI_FREE(out_obj);
2996     return;
2997     + }
2998     + memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
2999     + ACPI_FREE(out_obj);
3000    
3001     if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
3002     if (smart.shutdown_state)
3003     @@ -1793,7 +1802,6 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
3004     set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
3005     nfit_mem->dirty_shutdown = smart.shutdown_count;
3006     }
3007     - ACPI_FREE(out_obj);
3008     }
3009    
3010     static void populate_shutdown_status(struct nfit_mem *nfit_mem)
3011     @@ -1915,18 +1923,19 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
3012     | 1 << ND_CMD_SET_CONFIG_DATA;
3013     if (family == NVDIMM_FAMILY_INTEL
3014     && (dsm_mask & label_mask) == label_mask)
3015     - return 0;
3016     -
3017     - if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
3018     - && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
3019     - dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
3020     - set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
3021     - }
3022     + /* skip _LS{I,R,W} enabling */;
3023     + else {
3024     + if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
3025     + && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
3026     + dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
3027     + set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
3028     + }
3029    
3030     - if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
3031     - && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
3032     - dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
3033     - set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
3034     + if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
3035     + && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
3036     + dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
3037     + set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
3038     + }
3039     }
3040    
3041     populate_shutdown_status(nfit_mem);
3042     @@ -3004,14 +3013,16 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
3043     {
3044     int rc;
3045    
3046     - if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
3047     + if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3048     return acpi_nfit_register_region(acpi_desc, nfit_spa);
3049    
3050     set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
3051     - set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
3052     + if (!no_init_ars)
3053     + set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
3054    
3055     switch (acpi_nfit_query_poison(acpi_desc)) {
3056     case 0:
3057     + case -ENOSPC:
3058     case -EAGAIN:
3059     rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
3060     /* shouldn't happen, try again later */
3061     @@ -3036,7 +3047,6 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
3062     break;
3063     case -EBUSY:
3064     case -ENOMEM:
3065     - case -ENOSPC:
3066     /*
3067     * BIOS was using ARS, wait for it to complete (or
3068     * resources to become available) and then perform our
3069     diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
3070     index 5fa1898755a3..7c84f64c74f7 100644
3071     --- a/drivers/base/power/wakeup.c
3072     +++ b/drivers/base/power/wakeup.c
3073     @@ -118,7 +118,6 @@ void wakeup_source_drop(struct wakeup_source *ws)
3074     if (!ws)
3075     return;
3076    
3077     - del_timer_sync(&ws->timer);
3078     __pm_relax(ws);
3079     }
3080     EXPORT_SYMBOL_GPL(wakeup_source_drop);
3081     @@ -205,6 +204,13 @@ void wakeup_source_remove(struct wakeup_source *ws)
3082     list_del_rcu(&ws->entry);
3083     raw_spin_unlock_irqrestore(&events_lock, flags);
3084     synchronize_srcu(&wakeup_srcu);
3085     +
3086     + del_timer_sync(&ws->timer);
3087     + /*
3088     + * Clear timer.function to make wakeup_source_not_registered() treat
3089     + * this wakeup source as not registered.
3090     + */
3091     + ws->timer.function = NULL;
3092     }
3093     EXPORT_SYMBOL_GPL(wakeup_source_remove);
3094    
3095     diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
3096     index 52f6152d1fcb..7ae52c17618e 100644
3097     --- a/drivers/char/ipmi/ipmi_si.h
3098     +++ b/drivers/char/ipmi/ipmi_si.h
3099     @@ -25,7 +25,9 @@ void ipmi_irq_finish_setup(struct si_sm_io *io);
3100     int ipmi_si_remove_by_dev(struct device *dev);
3101     void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
3102     unsigned long addr);
3103     -int ipmi_si_hardcode_find_bmc(void);
3104     +void ipmi_hardcode_init(void);
3105     +void ipmi_si_hardcode_exit(void);
3106     +int ipmi_si_hardcode_match(int addr_type, unsigned long addr);
3107     void ipmi_si_platform_init(void);
3108     void ipmi_si_platform_shutdown(void);
3109    
3110     diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
3111     index 487642809c58..1e5783961b0d 100644
3112     --- a/drivers/char/ipmi/ipmi_si_hardcode.c
3113     +++ b/drivers/char/ipmi/ipmi_si_hardcode.c
3114     @@ -3,6 +3,7 @@
3115     #define pr_fmt(fmt) "ipmi_hardcode: " fmt
3116    
3117     #include <linux/moduleparam.h>
3118     +#include <linux/platform_device.h>
3119     #include "ipmi_si.h"
3120    
3121     /*
3122     @@ -12,23 +13,22 @@
3123    
3124     #define SI_MAX_PARMS 4
3125    
3126     -static char *si_type[SI_MAX_PARMS];
3127     #define MAX_SI_TYPE_STR 30
3128     -static char si_type_str[MAX_SI_TYPE_STR];
3129     +static char si_type_str[MAX_SI_TYPE_STR] __initdata;
3130     static unsigned long addrs[SI_MAX_PARMS];
3131     static unsigned int num_addrs;
3132     static unsigned int ports[SI_MAX_PARMS];
3133     static unsigned int num_ports;
3134     -static int irqs[SI_MAX_PARMS];
3135     -static unsigned int num_irqs;
3136     -static int regspacings[SI_MAX_PARMS];
3137     -static unsigned int num_regspacings;
3138     -static int regsizes[SI_MAX_PARMS];
3139     -static unsigned int num_regsizes;
3140     -static int regshifts[SI_MAX_PARMS];
3141     -static unsigned int num_regshifts;
3142     -static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
3143     -static unsigned int num_slave_addrs;
3144     +static int irqs[SI_MAX_PARMS] __initdata;
3145     +static unsigned int num_irqs __initdata;
3146     +static int regspacings[SI_MAX_PARMS] __initdata;
3147     +static unsigned int num_regspacings __initdata;
3148     +static int regsizes[SI_MAX_PARMS] __initdata;
3149     +static unsigned int num_regsizes __initdata;
3150     +static int regshifts[SI_MAX_PARMS] __initdata;
3151     +static unsigned int num_regshifts __initdata;
3152     +static int slave_addrs[SI_MAX_PARMS] __initdata;
3153     +static unsigned int num_slave_addrs __initdata;
3154    
3155     module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
3156     MODULE_PARM_DESC(type, "Defines the type of each interface, each"
3157     @@ -73,12 +73,133 @@ MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
3158     " overridden by this parm. This is an array indexed"
3159     " by interface number.");
3160    
3161     -int ipmi_si_hardcode_find_bmc(void)
3162     +static struct platform_device *ipmi_hc_pdevs[SI_MAX_PARMS];
3163     +
3164     +static void __init ipmi_hardcode_init_one(const char *si_type_str,
3165     + unsigned int i,
3166     + unsigned long addr,
3167     + unsigned int flags)
3168     {
3169     - int ret = -ENODEV;
3170     - int i;
3171     - struct si_sm_io io;
3172     + struct platform_device *pdev;
3173     + unsigned int num_r = 1, size;
3174     + struct resource r[4];
3175     + struct property_entry p[6];
3176     + enum si_type si_type;
3177     + unsigned int regspacing, regsize;
3178     + int rv;
3179     +
3180     + memset(p, 0, sizeof(p));
3181     + memset(r, 0, sizeof(r));
3182     +
3183     + if (!si_type_str || !*si_type_str || strcmp(si_type_str, "kcs") == 0) {
3184     + size = 2;
3185     + si_type = SI_KCS;
3186     + } else if (strcmp(si_type_str, "smic") == 0) {
3187     + size = 2;
3188     + si_type = SI_SMIC;
3189     + } else if (strcmp(si_type_str, "bt") == 0) {
3190     + size = 3;
3191     + si_type = SI_BT;
3192     + } else if (strcmp(si_type_str, "invalid") == 0) {
3193     + /*
3194     + * Allow a firmware-specified interface to be
3195     + * disabled.
3196     + */
3197     + size = 1;
3198     + si_type = SI_TYPE_INVALID;
3199     + } else {
3200     + pr_warn("Interface type specified for interface %d, was invalid: %s\n",
3201     + i, si_type_str);
3202     + return;
3203     + }
3204     +
3205     + regsize = regsizes[i];
3206     + if (regsize == 0)
3207     + regsize = DEFAULT_REGSIZE;
3208     +
3209     + p[0] = PROPERTY_ENTRY_U8("ipmi-type", si_type);
3210     + p[1] = PROPERTY_ENTRY_U8("slave-addr", slave_addrs[i]);
3211     + p[2] = PROPERTY_ENTRY_U8("addr-source", SI_HARDCODED);
3212     + p[3] = PROPERTY_ENTRY_U8("reg-shift", regshifts[i]);
3213     + p[4] = PROPERTY_ENTRY_U8("reg-size", regsize);
3214     + /* Last entry must be left NULL to terminate it. */
3215     +
3216     + /*
3217     + * Register spacing is derived from the resources in
3218     + * the IPMI platform code.
3219     + */
3220     + regspacing = regspacings[i];
3221     + if (regspacing == 0)
3222     + regspacing = regsize;
3223     +
3224     + r[0].start = addr;
3225     + r[0].end = r[0].start + regsize - 1;
3226     + r[0].name = "IPMI Address 1";
3227     + r[0].flags = flags;
3228     +
3229     + if (size > 1) {
3230     + r[1].start = r[0].start + regspacing;
3231     + r[1].end = r[1].start + regsize - 1;
3232     + r[1].name = "IPMI Address 2";
3233     + r[1].flags = flags;
3234     + num_r++;
3235     + }
3236     +
3237     + if (size > 2) {
3238     + r[2].start = r[1].start + regspacing;
3239     + r[2].end = r[2].start + regsize - 1;
3240     + r[2].name = "IPMI Address 3";
3241     + r[2].flags = flags;
3242     + num_r++;
3243     + }
3244     +
3245     + if (irqs[i]) {
3246     + r[num_r].start = irqs[i];
3247     + r[num_r].end = irqs[i];
3248     + r[num_r].name = "IPMI IRQ";
3249     + r[num_r].flags = IORESOURCE_IRQ;
3250     + num_r++;
3251     + }
3252     +
3253     + pdev = platform_device_alloc("hardcode-ipmi-si", i);
3254     + if (!pdev) {
3255     + pr_err("Error allocating IPMI platform device %d\n", i);
3256     + return;
3257     + }
3258     +
3259     + rv = platform_device_add_resources(pdev, r, num_r);
3260     + if (rv) {
3261     + dev_err(&pdev->dev,
3262     + "Unable to add hard-code resources: %d\n", rv);
3263     + goto err;
3264     + }
3265     +
3266     + rv = platform_device_add_properties(pdev, p);
3267     + if (rv) {
3268     + dev_err(&pdev->dev,
3269     + "Unable to add hard-code properties: %d\n", rv);
3270     + goto err;
3271     + }
3272     +
3273     + rv = platform_device_add(pdev);
3274     + if (rv) {
3275     + dev_err(&pdev->dev,
3276     + "Unable to add hard-code device: %d\n", rv);
3277     + goto err;
3278     + }
3279     +
3280     + ipmi_hc_pdevs[i] = pdev;
3281     + return;
3282     +
3283     +err:
3284     + platform_device_put(pdev);
3285     +}
3286     +
3287     +void __init ipmi_hardcode_init(void)
3288     +{
3289     + unsigned int i;
3290     char *str;
3291     + char *si_type[SI_MAX_PARMS];
3292    
3293     /* Parse out the si_type string into its components. */
3294     str = si_type_str;
3295     @@ -95,54 +216,45 @@ int ipmi_si_hardcode_find_bmc(void)
3296     }
3297     }
3298    
3299     - memset(&io, 0, sizeof(io));
3300     for (i = 0; i < SI_MAX_PARMS; i++) {
3301     - if (!ports[i] && !addrs[i])
3302     - continue;
3303     -
3304     - io.addr_source = SI_HARDCODED;
3305     - pr_info("probing via hardcoded address\n");
3306     -
3307     - if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
3308     - io.si_type = SI_KCS;
3309     - } else if (strcmp(si_type[i], "smic") == 0) {
3310     - io.si_type = SI_SMIC;
3311     - } else if (strcmp(si_type[i], "bt") == 0) {
3312     - io.si_type = SI_BT;
3313     - } else {
3314     - pr_warn("Interface type specified for interface %d, was invalid: %s\n",
3315     - i, si_type[i]);
3316     - continue;
3317     - }
3318     + if (i < num_ports && ports[i])
3319     + ipmi_hardcode_init_one(si_type[i], i, ports[i],
3320     + IORESOURCE_IO);
3321     + if (i < num_addrs && addrs[i])
3322     + ipmi_hardcode_init_one(si_type[i], i, addrs[i],
3323     + IORESOURCE_MEM);
3324     + }
3325     +}
3326    
3327     - if (ports[i]) {
3328     - /* An I/O port */
3329     - io.addr_data = ports[i];
3330     - io.addr_type = IPMI_IO_ADDR_SPACE;
3331     - } else if (addrs[i]) {
3332     - /* A memory port */
3333     - io.addr_data = addrs[i];
3334     - io.addr_type = IPMI_MEM_ADDR_SPACE;
3335     - } else {
3336     - pr_warn("Interface type specified for interface %d, but port and address were not set or set to zero\n",
3337     - i);
3338     - continue;
3339     - }
3340     +void ipmi_si_hardcode_exit(void)
3341     +{
3342     + unsigned int i;
3343    
3344     - io.addr = NULL;
3345     - io.regspacing = regspacings[i];
3346     - if (!io.regspacing)
3347     - io.regspacing = DEFAULT_REGSPACING;
3348     - io.regsize = regsizes[i];
3349     - if (!io.regsize)
3350     - io.regsize = DEFAULT_REGSIZE;
3351     - io.regshift = regshifts[i];
3352     - io.irq = irqs[i];
3353     - if (io.irq)
3354     - io.irq_setup = ipmi_std_irq_setup;
3355     - io.slave_addr = slave_addrs[i];
3356     -
3357     - ret = ipmi_si_add_smi(&io);
3358     + for (i = 0; i < SI_MAX_PARMS; i++) {
3359     + if (ipmi_hc_pdevs[i])
3360     + platform_device_unregister(ipmi_hc_pdevs[i]);
3361     }
3362     - return ret;
3363     +}
3364     +
3365     +/*
3366     + * Returns true of the given address exists as a hardcoded address,
3367     + * false if not.
3368     + */
3369     +int ipmi_si_hardcode_match(int addr_type, unsigned long addr)
3370     +{
3371     + unsigned int i;
3372     +
3373     + if (addr_type == IPMI_IO_ADDR_SPACE) {
3374     + for (i = 0; i < num_ports; i++) {
3375     + if (ports[i] == addr)
3376     + return 1;
3377     + }
3378     + } else {
3379     + for (i = 0; i < num_addrs; i++) {
3380     + if (addrs[i] == addr)
3381     + return 1;
3382     + }
3383     + }
3384     +
3385     + return 0;
3386     }
3387     diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
3388     index dc8603d34320..5294abc4c96c 100644
3389     --- a/drivers/char/ipmi/ipmi_si_intf.c
3390     +++ b/drivers/char/ipmi/ipmi_si_intf.c
3391     @@ -1862,6 +1862,18 @@ int ipmi_si_add_smi(struct si_sm_io *io)
3392     int rv = 0;
3393     struct smi_info *new_smi, *dup;
3394    
3395     + /*
3396     + * If the user gave us a hard-coded device at the same
3397     + * address, they presumably want us to use it and not what is
3398     + * in the firmware.
3399     + */
3400     + if (io->addr_source != SI_HARDCODED &&
3401     + ipmi_si_hardcode_match(io->addr_type, io->addr_data)) {
3402     + dev_info(io->dev,
3403     + "Hard-coded device at this address already exists");
3404     + return -ENODEV;
3405     + }
3406     +
3407     if (!io->io_setup) {
3408     if (io->addr_type == IPMI_IO_ADDR_SPACE) {
3409     io->io_setup = ipmi_si_port_setup;
3410     @@ -2085,11 +2097,16 @@ static int try_smi_init(struct smi_info *new_smi)
3411     WARN_ON(new_smi->io.dev->init_name != NULL);
3412    
3413     out_err:
3414     + if (rv && new_smi->io.io_cleanup) {
3415     + new_smi->io.io_cleanup(&new_smi->io);
3416     + new_smi->io.io_cleanup = NULL;
3417     + }
3418     +
3419     kfree(init_name);
3420     return rv;
3421     }
3422    
3423     -static int init_ipmi_si(void)
3424     +static int __init init_ipmi_si(void)
3425     {
3426     struct smi_info *e;
3427     enum ipmi_addr_src type = SI_INVALID;
3428     @@ -2097,11 +2114,9 @@ static int init_ipmi_si(void)
3429     if (initialized)
3430     return 0;
3431    
3432     - pr_info("IPMI System Interface driver\n");
3433     + ipmi_hardcode_init();
3434    
3435     - /* If the user gave us a device, they presumably want us to use it */
3436     - if (!ipmi_si_hardcode_find_bmc())
3437     - goto do_scan;
3438     + pr_info("IPMI System Interface driver\n");
3439    
3440     ipmi_si_platform_init();
3441    
3442     @@ -2113,7 +2128,6 @@ static int init_ipmi_si(void)
3443     with multiple BMCs we assume that there will be several instances
3444     of a given type so if we succeed in registering a type then also
3445     try to register everything else of the same type */
3446     -do_scan:
3447     mutex_lock(&smi_infos_lock);
3448     list_for_each_entry(e, &smi_infos, link) {
3449     /* Try to register a device if it has an IRQ and we either
3450     @@ -2299,6 +2313,8 @@ static void cleanup_ipmi_si(void)
3451     list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3452     cleanup_one_si(e);
3453     mutex_unlock(&smi_infos_lock);
3454     +
3455     + ipmi_si_hardcode_exit();
3456     }
3457     module_exit(cleanup_ipmi_si);
3458    
3459     diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c
3460     index fd0ec8d6bf0e..75583612ab10 100644
3461     --- a/drivers/char/ipmi/ipmi_si_mem_io.c
3462     +++ b/drivers/char/ipmi/ipmi_si_mem_io.c
3463     @@ -81,8 +81,6 @@ int ipmi_si_mem_setup(struct si_sm_io *io)
3464     if (!addr)
3465     return -ENODEV;
3466    
3467     - io->io_cleanup = mem_cleanup;
3468     -
3469     /*
3470     * Figure out the actual readb/readw/readl/etc routine to use based
3471     * upon the register size.
3472     @@ -141,5 +139,8 @@ int ipmi_si_mem_setup(struct si_sm_io *io)
3473     mem_region_cleanup(io, io->io_size);
3474     return -EIO;
3475     }
3476     +
3477     + io->io_cleanup = mem_cleanup;
3478     +
3479     return 0;
3480     }
3481     diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
3482     index 15cf819f884f..8158d03542f4 100644
3483     --- a/drivers/char/ipmi/ipmi_si_platform.c
3484     +++ b/drivers/char/ipmi/ipmi_si_platform.c
3485     @@ -128,8 +128,6 @@ ipmi_get_info_from_resources(struct platform_device *pdev,
3486     if (res_second->start > io->addr_data)
3487     io->regspacing = res_second->start - io->addr_data;
3488     }
3489     - io->regsize = DEFAULT_REGSIZE;
3490     - io->regshift = 0;
3491    
3492     return res;
3493     }
3494     @@ -137,7 +135,7 @@ ipmi_get_info_from_resources(struct platform_device *pdev,
3495     static int platform_ipmi_probe(struct platform_device *pdev)
3496     {
3497     struct si_sm_io io;
3498     - u8 type, slave_addr, addr_source;
3499     + u8 type, slave_addr, addr_source, regsize, regshift;
3500     int rv;
3501    
3502     rv = device_property_read_u8(&pdev->dev, "addr-source", &addr_source);
3503     @@ -149,7 +147,7 @@ static int platform_ipmi_probe(struct platform_device *pdev)
3504     if (addr_source == SI_SMBIOS) {
3505     if (!si_trydmi)
3506     return -ENODEV;
3507     - } else {
3508     + } else if (addr_source != SI_HARDCODED) {
3509     if (!si_tryplatform)
3510     return -ENODEV;
3511     }
3512     @@ -169,11 +167,23 @@ static int platform_ipmi_probe(struct platform_device *pdev)
3513     case SI_BT:
3514     io.si_type = type;
3515     break;
3516     + case SI_TYPE_INVALID: /* User disabled this in hardcode. */
3517     + return -ENODEV;
3518     default:
3519     dev_err(&pdev->dev, "ipmi-type property is invalid\n");
3520     return -EINVAL;
3521     }
3522    
3523     + io.regsize = DEFAULT_REGSIZE;
3524     + rv = device_property_read_u8(&pdev->dev, "reg-size", &regsize);
3525     + if (!rv)
3526     + io.regsize = regsize;
3527     +
3528     + io.regshift = 0;
3529     + rv = device_property_read_u8(&pdev->dev, "reg-shift", &regshift);
3530     + if (!rv)
3531     + io.regshift = regshift;
3532     +
3533     if (!ipmi_get_info_from_resources(pdev, &io))
3534     return -EINVAL;
3535    
3536     @@ -193,7 +203,8 @@ static int platform_ipmi_probe(struct platform_device *pdev)
3537    
3538     io.dev = &pdev->dev;
3539    
3540     - pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
3541     + pr_info("ipmi_si: %s: %s %#lx regsize %d spacing %d irq %d\n",
3542     + ipmi_addr_src_to_str(addr_source),
3543     (io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
3544     io.addr_data, io.regsize, io.regspacing, io.irq);
3545    
3546     @@ -358,6 +369,9 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
3547     goto err_free;
3548     }
3549    
3550     + io.regsize = DEFAULT_REGSIZE;
3551     + io.regshift = 0;
3552     +
3553     res = ipmi_get_info_from_resources(pdev, &io);
3554     if (!res) {
3555     rv = -EINVAL;
3556     @@ -420,8 +434,9 @@ static int ipmi_remove(struct platform_device *pdev)
3557     }
3558    
3559     static const struct platform_device_id si_plat_ids[] = {
3560     - { "dmi-ipmi-si", 0 },
3561     - { }
3562     + { "dmi-ipmi-si", 0 },
3563     + { "hardcode-ipmi-si", 0 },
3564     + { }
3565     };
3566    
3567     struct platform_driver ipmi_platform_driver = {
3568     diff --git a/drivers/char/ipmi/ipmi_si_port_io.c b/drivers/char/ipmi/ipmi_si_port_io.c
3569     index ef6dffcea9fa..03924c32b6e9 100644
3570     --- a/drivers/char/ipmi/ipmi_si_port_io.c
3571     +++ b/drivers/char/ipmi/ipmi_si_port_io.c
3572     @@ -68,8 +68,6 @@ int ipmi_si_port_setup(struct si_sm_io *io)
3573     if (!addr)
3574     return -ENODEV;
3575    
3576     - io->io_cleanup = port_cleanup;
3577     -
3578     /*
3579     * Figure out the actual inb/inw/inl/etc routine to use based
3580     * upon the register size.
3581     @@ -109,5 +107,8 @@ int ipmi_si_port_setup(struct si_sm_io *io)
3582     return -EIO;
3583     }
3584     }
3585     +
3586     + io->io_cleanup = port_cleanup;
3587     +
3588     return 0;
3589     }
3590     diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
3591     index 64dc560859f2..13dc614b7ebc 100644
3592     --- a/drivers/char/tpm/st33zp24/st33zp24.c
3593     +++ b/drivers/char/tpm/st33zp24/st33zp24.c
3594     @@ -436,7 +436,7 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
3595     goto out_err;
3596     }
3597    
3598     - return len;
3599     + return 0;
3600     out_err:
3601     st33zp24_cancel(chip);
3602     release_locality(chip);
3603     diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
3604     index d9439f9abe78..88d2e01a651d 100644
3605     --- a/drivers/char/tpm/tpm-interface.c
3606     +++ b/drivers/char/tpm/tpm-interface.c
3607     @@ -230,10 +230,19 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
3608     if (rc < 0) {
3609     if (rc != -EPIPE)
3610     dev_err(&chip->dev,
3611     - "%s: tpm_send: error %d\n", __func__, rc);
3612     + "%s: send(): error %d\n", __func__, rc);
3613     goto out;
3614     }
3615    
3616     + /* A sanity check. send() should just return zero on success e.g.
3617     + * not the command length.
3618     + */
3619     + if (rc > 0) {
3620     + dev_warn(&chip->dev,
3621     + "%s: send(): invalid value %d\n", __func__, rc);
3622     + rc = 0;
3623     + }
3624     +
3625     if (chip->flags & TPM_CHIP_FLAG_IRQ)
3626     goto out_recv;
3627    
3628     diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
3629     index 66a14526aaf4..a290b30a0c35 100644
3630     --- a/drivers/char/tpm/tpm_atmel.c
3631     +++ b/drivers/char/tpm/tpm_atmel.c
3632     @@ -105,7 +105,7 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
3633     iowrite8(buf[i], priv->iobase);
3634     }
3635    
3636     - return count;
3637     + return 0;
3638     }
3639    
3640     static void tpm_atml_cancel(struct tpm_chip *chip)
3641     diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
3642     index 36952ef98f90..763fc7e6c005 100644
3643     --- a/drivers/char/tpm/tpm_crb.c
3644     +++ b/drivers/char/tpm/tpm_crb.c
3645     @@ -287,19 +287,29 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
3646     struct crb_priv *priv = dev_get_drvdata(&chip->dev);
3647     unsigned int expected;
3648    
3649     - /* sanity check */
3650     - if (count < 6)
3651     + /* A sanity check that the upper layer wants to get at least the header
3652     + * as that is the minimum size for any TPM response.
3653     + */
3654     + if (count < TPM_HEADER_SIZE)
3655     return -EIO;
3656    
3657     + /* If this bit is set, according to the spec, the TPM is in
3658     + * unrecoverable condition.
3659     + */
3660     if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR)
3661     return -EIO;
3662    
3663     - memcpy_fromio(buf, priv->rsp, 6);
3664     - expected = be32_to_cpup((__be32 *) &buf[2]);
3665     - if (expected > count || expected < 6)
3666     + /* Read the first 8 bytes in order to get the length of the response.
3667     + * We read exactly a quad word in order to make sure that the remaining
3668     + * reads will be aligned.
3669     + */
3670     + memcpy_fromio(buf, priv->rsp, 8);
3671     +
3672     + expected = be32_to_cpup((__be32 *)&buf[2]);
3673     + if (expected > count || expected < TPM_HEADER_SIZE)
3674     return -EIO;
3675    
3676     - memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
3677     + memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8);
3678    
3679     return expected;
3680     }
3681     diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
3682     index 95ce2e9ccdc6..32a8e27c5382 100644
3683     --- a/drivers/char/tpm/tpm_i2c_atmel.c
3684     +++ b/drivers/char/tpm/tpm_i2c_atmel.c
3685     @@ -65,7 +65,11 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
3686     dev_dbg(&chip->dev,
3687     "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
3688     (int)min_t(size_t, 64, len), buf, len, status);
3689     - return status;
3690     +
3691     + if (status < 0)
3692     + return status;
3693     +
3694     + return 0;
3695     }
3696    
3697     static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
3698     diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
3699     index 9086edc9066b..977fd42daa1b 100644
3700     --- a/drivers/char/tpm/tpm_i2c_infineon.c
3701     +++ b/drivers/char/tpm/tpm_i2c_infineon.c
3702     @@ -587,7 +587,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
3703     /* go and do it */
3704     iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1);
3705    
3706     - return len;
3707     + return 0;
3708     out_err:
3709     tpm_tis_i2c_ready(chip);
3710     /* The TPM needs some time to clean up here,
3711     diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
3712     index 217f7f1cbde8..058220edb8b3 100644
3713     --- a/drivers/char/tpm/tpm_i2c_nuvoton.c
3714     +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
3715     @@ -467,7 +467,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
3716     }
3717    
3718     dev_dbg(dev, "%s() -> %zd\n", __func__, len);
3719     - return len;
3720     + return 0;
3721     }
3722    
3723     static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
3724     diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
3725     index 07b5a487d0c8..757ca45b39b8 100644
3726     --- a/drivers/char/tpm/tpm_ibmvtpm.c
3727     +++ b/drivers/char/tpm/tpm_ibmvtpm.c
3728     @@ -139,14 +139,14 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
3729     }
3730    
3731     /**
3732     - * tpm_ibmvtpm_send - Send tpm request
3733     - *
3734     + * tpm_ibmvtpm_send() - Send a TPM command
3735     * @chip: tpm chip struct
3736     * @buf: buffer contains data to send
3737     * @count: size of buffer
3738     *
3739     * Return:
3740     - * Number of bytes sent or < 0 on error.
3741     + * 0 on success,
3742     + * -errno on error
3743     */
3744     static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
3745     {
3746     @@ -192,7 +192,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
3747     rc = 0;
3748     ibmvtpm->tpm_processing_cmd = false;
3749     } else
3750     - rc = count;
3751     + rc = 0;
3752    
3753     spin_unlock(&ibmvtpm->rtce_lock);
3754     return rc;
3755     diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
3756     index d8f10047fbba..97f6d4fe0aee 100644
3757     --- a/drivers/char/tpm/tpm_infineon.c
3758     +++ b/drivers/char/tpm/tpm_infineon.c
3759     @@ -354,7 +354,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
3760     for (i = 0; i < count; i++) {
3761     wait_and_send(chip, buf[i]);
3762     }
3763     - return count;
3764     + return 0;
3765     }
3766    
3767     static void tpm_inf_cancel(struct tpm_chip *chip)
3768     diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
3769     index 5d6cce74cd3f..9bee3c5eb4bf 100644
3770     --- a/drivers/char/tpm/tpm_nsc.c
3771     +++ b/drivers/char/tpm/tpm_nsc.c
3772     @@ -226,7 +226,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
3773     }
3774     outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND);
3775    
3776     - return count;
3777     + return 0;
3778     }
3779    
3780     static void tpm_nsc_cancel(struct tpm_chip *chip)
3781     diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
3782     index bf7e49cfa643..bb0c2e160562 100644
3783     --- a/drivers/char/tpm/tpm_tis_core.c
3784     +++ b/drivers/char/tpm/tpm_tis_core.c
3785     @@ -481,7 +481,7 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
3786     goto out_err;
3787     }
3788     }
3789     - return len;
3790     + return 0;
3791     out_err:
3792     tpm_tis_ready(chip);
3793     return rc;
3794     diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
3795     index 87a0ce47f201..ecbb63f8d231 100644
3796     --- a/drivers/char/tpm/tpm_vtpm_proxy.c
3797     +++ b/drivers/char/tpm/tpm_vtpm_proxy.c
3798     @@ -335,7 +335,6 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
3799     static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
3800     {
3801     struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
3802     - int rc = 0;
3803    
3804     if (count > sizeof(proxy_dev->buffer)) {
3805     dev_err(&chip->dev,
3806     @@ -366,7 +365,7 @@ static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
3807    
3808     wake_up_interruptible(&proxy_dev->wq);
3809    
3810     - return rc;
3811     + return 0;
3812     }
3813    
3814     static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip)
3815     diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
3816     index b150f87f38f5..5a327eb7f63a 100644
3817     --- a/drivers/char/tpm/xen-tpmfront.c
3818     +++ b/drivers/char/tpm/xen-tpmfront.c
3819     @@ -173,7 +173,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
3820     return -ETIME;
3821     }
3822    
3823     - return count;
3824     + return 0;
3825     }
3826    
3827     static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
3828     diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
3829     index ea846f77750b..0cad5748bf0e 100644
3830     --- a/drivers/clk/clk-twl6040.c
3831     +++ b/drivers/clk/clk-twl6040.c
3832     @@ -41,6 +41,43 @@ static int twl6040_pdmclk_is_prepared(struct clk_hw *hw)
3833     return pdmclk->enabled;
3834     }
3835    
3836     +static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk,
3837     + unsigned int reg)
3838     +{
3839     + const u8 reset_mask = TWL6040_HPLLRST; /* Same for HPPLL and LPPLL */
3840     + int ret;
3841     +
3842     + ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask);
3843     + if (ret < 0)
3844     + return ret;
3845     +
3846     + ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask);
3847     + if (ret < 0)
3848     + return ret;
3849     +
3850     + return 0;
3851     +}
3852     +
3853     +/*
3854     + * TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At
3855     + * Cold Temperature". This affects cold boot and deeper idle states it
3856     + * seems. The workaround consists of resetting HPPLL and LPPLL.
3857     + */
3858     +static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk)
3859     +{
3860     + int ret;
3861     +
3862     + ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL);
3863     + if (ret)
3864     + return ret;
3865     +
3866     + ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL);
3867     + if (ret)
3868     + return ret;
3869     +
3870     + return 0;
3871     +}
3872     +
3873     static int twl6040_pdmclk_prepare(struct clk_hw *hw)
3874     {
3875     struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
3876     @@ -48,8 +85,20 @@ static int twl6040_pdmclk_prepare(struct clk_hw *hw)
3877     int ret;
3878    
3879     ret = twl6040_power(pdmclk->twl6040, 1);
3880     - if (!ret)
3881     - pdmclk->enabled = 1;
3882     + if (ret)
3883     + return ret;
3884     +
3885     + ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk);
3886     + if (ret)
3887     + goto out_err;
3888     +
3889     + pdmclk->enabled = 1;
3890     +
3891     + return 0;
3892     +
3893     +out_err:
3894     + dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret);
3895     + twl6040_power(pdmclk->twl6040, 0);
3896    
3897     return ret;
3898     }
3899     diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
3900     index 5ef7d9ba2195..b40160eb3372 100644
3901     --- a/drivers/clk/ingenic/cgu.c
3902     +++ b/drivers/clk/ingenic/cgu.c
3903     @@ -426,16 +426,16 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
3904     struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
3905     struct ingenic_cgu *cgu = ingenic_clk->cgu;
3906     const struct ingenic_cgu_clk_info *clk_info;
3907     - long rate = *parent_rate;
3908     + unsigned int div = 1;
3909    
3910     clk_info = &cgu->clock_info[ingenic_clk->idx];
3911    
3912     if (clk_info->type & CGU_CLK_DIV)
3913     - rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
3914     + div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
3915     else if (clk_info->type & CGU_CLK_FIXDIV)
3916     - rate /= clk_info->fixdiv.div;
3917     + div = clk_info->fixdiv.div;
3918    
3919     - return rate;
3920     + return DIV_ROUND_UP(*parent_rate, div);
3921     }
3922    
3923     static int
3924     @@ -455,7 +455,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
3925    
3926     if (clk_info->type & CGU_CLK_DIV) {
3927     div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
3928     - rate = parent_rate / div;
3929     + rate = DIV_ROUND_UP(parent_rate, div);
3930    
3931     if (rate != req_rate)
3932     return -EINVAL;
3933     diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
3934     index 502bcbb61b04..e12716d8ce3c 100644
3935     --- a/drivers/clk/ingenic/cgu.h
3936     +++ b/drivers/clk/ingenic/cgu.h
3937     @@ -80,7 +80,7 @@ struct ingenic_cgu_mux_info {
3938     * @reg: offset of the divider control register within the CGU
3939     * @shift: number of bits to left shift the divide value by (ie. the index of
3940     * the lowest bit of the divide value within its control register)
3941     - * @div: number of bits to divide the divider value by (i.e. if the
3942     + * @div: number to divide the divider value by (i.e. if the
3943     * effective divider value is the value written to the register
3944     * multiplied by some constant)
3945     * @bits: the size of the divide value in bits
3946     diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
3947     index 93306283d764..8ae44b5db4c2 100644
3948     --- a/drivers/clk/samsung/clk-exynos5-subcmu.c
3949     +++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
3950     @@ -136,15 +136,20 @@ static int __init exynos5_clk_register_subcmu(struct device *parent,
3951     {
3952     struct of_phandle_args genpdspec = { .np = pd_node };
3953     struct platform_device *pdev;
3954     + int ret;
3955     +
3956     + pdev = platform_device_alloc("exynos5-subcmu", PLATFORM_DEVID_AUTO);
3957     + if (!pdev)
3958     + return -ENOMEM;
3959    
3960     - pdev = platform_device_alloc(info->pd_name, -1);
3961     pdev->dev.parent = parent;
3962     - pdev->driver_override = "exynos5-subcmu";
3963     platform_set_drvdata(pdev, (void *)info);
3964     of_genpd_add_device(&genpdspec, &pdev->dev);
3965     - platform_device_add(pdev);
3966     + ret = platform_device_add(pdev);
3967     + if (ret)
3968     + platform_device_put(pdev);
3969    
3970     - return 0;
3971     + return ret;
3972     }
3973    
3974     static int __init exynos5_clk_probe(struct platform_device *pdev)
3975     diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c
3976     index ec11f55594ad..5d2d42b7e182 100644
3977     --- a/drivers/clk/uniphier/clk-uniphier-cpugear.c
3978     +++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c
3979     @@ -47,7 +47,7 @@ static int uniphier_clk_cpugear_set_parent(struct clk_hw *hw, u8 index)
3980     return ret;
3981    
3982     ret = regmap_write_bits(gear->regmap,
3983     - gear->regbase + UNIPHIER_CLK_CPUGEAR_SET,
3984     + gear->regbase + UNIPHIER_CLK_CPUGEAR_UPD,
3985     UNIPHIER_CLK_CPUGEAR_UPD_BIT,
3986     UNIPHIER_CLK_CPUGEAR_UPD_BIT);
3987     if (ret)
3988     diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
3989     index a9e26f6a81a1..8dfd3bc448d0 100644
3990     --- a/drivers/clocksource/Kconfig
3991     +++ b/drivers/clocksource/Kconfig
3992     @@ -360,6 +360,16 @@ config ARM64_ERRATUM_858921
3993     The workaround will be dynamically enabled when an affected
3994     core is detected.
3995    
3996     +config SUN50I_ERRATUM_UNKNOWN1
3997     + bool "Workaround for Allwinner A64 erratum UNKNOWN1"
3998     + default y
3999     + depends on ARM_ARCH_TIMER && ARM64 && ARCH_SUNXI
4000     + select ARM_ARCH_TIMER_OOL_WORKAROUND
4001     + help
4002     + This option enables a workaround for instability in the timer on
4003     + the Allwinner A64 SoC. The workaround will only be active if the
4004     + allwinner,erratum-unknown1 property is found in the timer node.
4005     +
4006     config ARM_GLOBAL_TIMER
4007     bool "Support for the ARM global timer" if COMPILE_TEST
4008     select TIMER_OF if OF
4009     diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
4010     index 9a7d4dc00b6e..a8b20b65bd4b 100644
4011     --- a/drivers/clocksource/arm_arch_timer.c
4012     +++ b/drivers/clocksource/arm_arch_timer.c
4013     @@ -326,6 +326,48 @@ static u64 notrace arm64_1188873_read_cntvct_el0(void)
4014     }
4015     #endif
4016    
4017     +#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
4018     +/*
4019     + * The low bits of the counter registers are indeterminate while bit 10 or
4020     + * greater is rolling over. Since the counter value can jump both backward
4021     + * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
4022     + * with all ones or all zeros in the low bits. Bound the loop by the maximum
4023     + * number of CPU cycles in 3 consecutive 24 MHz counter periods.
4024     + */
4025     +#define __sun50i_a64_read_reg(reg) ({ \
4026     + u64 _val; \
4027     + int _retries = 150; \
4028     + \
4029     + do { \
4030     + _val = read_sysreg(reg); \
4031     + _retries--; \
4032     + } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
4033     + \
4034     + WARN_ON_ONCE(!_retries); \
4035     + _val; \
4036     +})
4037     +
4038     +static u64 notrace sun50i_a64_read_cntpct_el0(void)
4039     +{
4040     + return __sun50i_a64_read_reg(cntpct_el0);
4041     +}
4042     +
4043     +static u64 notrace sun50i_a64_read_cntvct_el0(void)
4044     +{
4045     + return __sun50i_a64_read_reg(cntvct_el0);
4046     +}
4047     +
4048     +static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
4049     +{
4050     + return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
4051     +}
4052     +
4053     +static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
4054     +{
4055     + return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
4056     +}
4057     +#endif
4058     +
4059     #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
4060     DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
4061     EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
4062     @@ -423,6 +465,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
4063     .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
4064     },
4065     #endif
4066     +#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
4067     + {
4068     + .match_type = ate_match_dt,
4069     + .id = "allwinner,erratum-unknown1",
4070     + .desc = "Allwinner erratum UNKNOWN1",
4071     + .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
4072     + .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
4073     + .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
4074     + .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
4075     + .set_next_event_phys = erratum_set_next_event_tval_phys,
4076     + .set_next_event_virt = erratum_set_next_event_tval_virt,
4077     + },
4078     +#endif
4079     };
4080    
4081     typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
4082     diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
4083     index 7a244b681876..d55c30f6981d 100644
4084     --- a/drivers/clocksource/exynos_mct.c
4085     +++ b/drivers/clocksource/exynos_mct.c
4086     @@ -388,6 +388,13 @@ static void exynos4_mct_tick_start(unsigned long cycles,
4087     exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
4088     }
4089    
4090     +static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
4091     +{
4092     + /* Clear the MCT tick interrupt */
4093     + if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
4094     + exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
4095     +}
4096     +
4097     static int exynos4_tick_set_next_event(unsigned long cycles,
4098     struct clock_event_device *evt)
4099     {
4100     @@ -404,6 +411,7 @@ static int set_state_shutdown(struct clock_event_device *evt)
4101    
4102     mevt = container_of(evt, struct mct_clock_event_device, evt);
4103     exynos4_mct_tick_stop(mevt);
4104     + exynos4_mct_tick_clear(mevt);
4105     return 0;
4106     }
4107    
4108     @@ -420,8 +428,11 @@ static int set_state_periodic(struct clock_event_device *evt)
4109     return 0;
4110     }
4111    
4112     -static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
4113     +static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
4114     {
4115     + struct mct_clock_event_device *mevt = dev_id;
4116     + struct clock_event_device *evt = &mevt->evt;
4117     +
4118     /*
4119     * This is for supporting oneshot mode.
4120     * Mct would generate interrupt periodically
4121     @@ -430,16 +441,6 @@ static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
4122     if (!clockevent_state_periodic(&mevt->evt))
4123     exynos4_mct_tick_stop(mevt);
4124    
4125     - /* Clear the MCT tick interrupt */
4126     - if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
4127     - exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
4128     -}
4129     -
4130     -static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
4131     -{
4132     - struct mct_clock_event_device *mevt = dev_id;
4133     - struct clock_event_device *evt = &mevt->evt;
4134     -
4135     exynos4_mct_tick_clear(mevt);
4136    
4137     evt->event_handler(evt);
4138     diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
4139     index 46254e583982..74e0e0c20c46 100644
4140     --- a/drivers/cpufreq/pxa2xx-cpufreq.c
4141     +++ b/drivers/cpufreq/pxa2xx-cpufreq.c
4142     @@ -143,7 +143,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
4143     return ret;
4144     }
4145    
4146     -static void __init pxa_cpufreq_init_voltages(void)
4147     +static void pxa_cpufreq_init_voltages(void)
4148     {
4149     vcc_core = regulator_get(NULL, "vcc_core");
4150     if (IS_ERR(vcc_core)) {
4151     @@ -159,7 +159,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
4152     return 0;
4153     }
4154    
4155     -static void __init pxa_cpufreq_init_voltages(void) { }
4156     +static void pxa_cpufreq_init_voltages(void) { }
4157     #endif
4158    
4159     static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
4160     diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
4161     index 2a3675c24032..a472b814058f 100644
4162     --- a/drivers/cpufreq/qcom-cpufreq-kryo.c
4163     +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
4164     @@ -75,7 +75,7 @@ static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
4165    
4166     static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
4167     {
4168     - struct opp_table *opp_tables[NR_CPUS] = {0};
4169     + struct opp_table **opp_tables;
4170     enum _msm8996_version msm8996_version;
4171     struct nvmem_cell *speedbin_nvmem;
4172     struct device_node *np;
4173     @@ -133,6 +133,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
4174     }
4175     kfree(speedbin);
4176    
4177     + opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL);
4178     + if (!opp_tables)
4179     + return -ENOMEM;
4180     +
4181     for_each_possible_cpu(cpu) {
4182     cpu_dev = get_cpu_device(cpu);
4183     if (NULL == cpu_dev) {
4184     @@ -151,8 +155,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
4185    
4186     cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
4187     NULL, 0);
4188     - if (!IS_ERR(cpufreq_dt_pdev))
4189     + if (!IS_ERR(cpufreq_dt_pdev)) {
4190     + platform_set_drvdata(pdev, opp_tables);
4191     return 0;
4192     + }
4193    
4194     ret = PTR_ERR(cpufreq_dt_pdev);
4195     dev_err(cpu_dev, "Failed to register platform device\n");
4196     @@ -163,13 +169,23 @@ free_opp:
4197     break;
4198     dev_pm_opp_put_supported_hw(opp_tables[cpu]);
4199     }
4200     + kfree(opp_tables);
4201    
4202     return ret;
4203     }
4204    
4205     static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
4206     {
4207     + struct opp_table **opp_tables = platform_get_drvdata(pdev);
4208     + unsigned int cpu;
4209     +
4210     platform_device_unregister(cpufreq_dt_pdev);
4211     +
4212     + for_each_possible_cpu(cpu)
4213     + dev_pm_opp_put_supported_hw(opp_tables[cpu]);
4214     +
4215     + kfree(opp_tables);
4216     +
4217     return 0;
4218     }
4219    
4220     diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
4221     index 43530254201a..4bb154f6c54c 100644
4222     --- a/drivers/cpufreq/tegra124-cpufreq.c
4223     +++ b/drivers/cpufreq/tegra124-cpufreq.c
4224     @@ -134,6 +134,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
4225    
4226     platform_set_drvdata(pdev, priv);
4227    
4228     + of_node_put(np);
4229     +
4230     return 0;
4231    
4232     out_switch_to_pllx:
4233     diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
4234     index bb93e5cf6a4a..9fddf828a76f 100644
4235     --- a/drivers/cpuidle/governor.c
4236     +++ b/drivers/cpuidle/governor.c
4237     @@ -89,6 +89,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
4238     mutex_lock(&cpuidle_lock);
4239     if (__cpuidle_find_governor(gov->name) == NULL) {
4240     ret = 0;
4241     + list_add_tail(&gov->governor_list, &cpuidle_governors);
4242     if (!cpuidle_curr_governor ||
4243     !strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) ||
4244     (cpuidle_curr_governor->rating < gov->rating &&
4245     diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
4246     index 80ae69f906fb..1c4f3a046dc5 100644
4247     --- a/drivers/crypto/caam/caamalg.c
4248     +++ b/drivers/crypto/caam/caamalg.c
4249     @@ -1040,6 +1040,7 @@ static void init_aead_job(struct aead_request *req,
4250     if (unlikely(req->src != req->dst)) {
4251     if (edesc->dst_nents == 1) {
4252     dst_dma = sg_dma_address(req->dst);
4253     + out_options = 0;
4254     } else {
4255     dst_dma = edesc->sec4_sg_dma +
4256     sec4_sg_index *
4257     diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
4258     index bb1a2cdf1951..0f11811a3585 100644
4259     --- a/drivers/crypto/caam/caamhash.c
4260     +++ b/drivers/crypto/caam/caamhash.c
4261     @@ -113,6 +113,7 @@ struct caam_hash_ctx {
4262     struct caam_hash_state {
4263     dma_addr_t buf_dma;
4264     dma_addr_t ctx_dma;
4265     + int ctx_dma_len;
4266     u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
4267     int buflen_0;
4268     u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
4269     @@ -165,6 +166,7 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
4270     struct caam_hash_state *state,
4271     int ctx_len)
4272     {
4273     + state->ctx_dma_len = ctx_len;
4274     state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
4275     ctx_len, DMA_FROM_DEVICE);
4276     if (dma_mapping_error(jrdev, state->ctx_dma)) {
4277     @@ -178,18 +180,6 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
4278     return 0;
4279     }
4280    
4281     -/* Map req->result, and append seq_out_ptr command that points to it */
4282     -static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
4283     - u8 *result, int digestsize)
4284     -{
4285     - dma_addr_t dst_dma;
4286     -
4287     - dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
4288     - append_seq_out_ptr(desc, dst_dma, digestsize, 0);
4289     -
4290     - return dst_dma;
4291     -}
4292     -
4293     /* Map current buffer in state (if length > 0) and put it in link table */
4294     static inline int buf_map_to_sec4_sg(struct device *jrdev,
4295     struct sec4_sg_entry *sec4_sg,
4296     @@ -218,6 +208,7 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
4297     struct caam_hash_state *state, int ctx_len,
4298     struct sec4_sg_entry *sec4_sg, u32 flag)
4299     {
4300     + state->ctx_dma_len = ctx_len;
4301     state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
4302     if (dma_mapping_error(jrdev, state->ctx_dma)) {
4303     dev_err(jrdev, "unable to map ctx\n");
4304     @@ -426,7 +417,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
4305    
4306     /*
4307     * ahash_edesc - s/w-extended ahash descriptor
4308     - * @dst_dma: physical mapped address of req->result
4309     * @sec4_sg_dma: physical mapped address of h/w link table
4310     * @src_nents: number of segments in input scatterlist
4311     * @sec4_sg_bytes: length of dma mapped sec4_sg space
4312     @@ -434,7 +424,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
4313     * @sec4_sg: h/w link table
4314     */
4315     struct ahash_edesc {
4316     - dma_addr_t dst_dma;
4317     dma_addr_t sec4_sg_dma;
4318     int src_nents;
4319     int sec4_sg_bytes;
4320     @@ -450,8 +439,6 @@ static inline void ahash_unmap(struct device *dev,
4321    
4322     if (edesc->src_nents)
4323     dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
4324     - if (edesc->dst_dma)
4325     - dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
4326    
4327     if (edesc->sec4_sg_bytes)
4328     dma_unmap_single(dev, edesc->sec4_sg_dma,
4329     @@ -468,12 +455,10 @@ static inline void ahash_unmap_ctx(struct device *dev,
4330     struct ahash_edesc *edesc,
4331     struct ahash_request *req, int dst_len, u32 flag)
4332     {
4333     - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4334     - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4335     struct caam_hash_state *state = ahash_request_ctx(req);
4336    
4337     if (state->ctx_dma) {
4338     - dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
4339     + dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
4340     state->ctx_dma = 0;
4341     }
4342     ahash_unmap(dev, edesc, req, dst_len);
4343     @@ -486,9 +471,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
4344     struct ahash_edesc *edesc;
4345     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4346     int digestsize = crypto_ahash_digestsize(ahash);
4347     + struct caam_hash_state *state = ahash_request_ctx(req);
4348     #ifdef DEBUG
4349     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4350     - struct caam_hash_state *state = ahash_request_ctx(req);
4351    
4352     dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
4353     #endif
4354     @@ -497,17 +482,14 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
4355     if (err)
4356     caam_jr_strstatus(jrdev, err);
4357    
4358     - ahash_unmap(jrdev, edesc, req, digestsize);
4359     + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
4360     + memcpy(req->result, state->caam_ctx, digestsize);
4361     kfree(edesc);
4362    
4363     #ifdef DEBUG
4364     print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
4365     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
4366     ctx->ctx_len, 1);
4367     - if (req->result)
4368     - print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
4369     - DUMP_PREFIX_ADDRESS, 16, 4, req->result,
4370     - digestsize, 1);
4371     #endif
4372    
4373     req->base.complete(&req->base, err);
4374     @@ -555,9 +537,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
4375     struct ahash_edesc *edesc;
4376     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4377     int digestsize = crypto_ahash_digestsize(ahash);
4378     + struct caam_hash_state *state = ahash_request_ctx(req);
4379     #ifdef DEBUG
4380     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4381     - struct caam_hash_state *state = ahash_request_ctx(req);
4382    
4383     dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
4384     #endif
4385     @@ -566,17 +548,14 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
4386     if (err)
4387     caam_jr_strstatus(jrdev, err);
4388    
4389     - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
4390     + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
4391     + memcpy(req->result, state->caam_ctx, digestsize);
4392     kfree(edesc);
4393    
4394     #ifdef DEBUG
4395     print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
4396     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
4397     ctx->ctx_len, 1);
4398     - if (req->result)
4399     - print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
4400     - DUMP_PREFIX_ADDRESS, 16, 4, req->result,
4401     - digestsize, 1);
4402     #endif
4403    
4404     req->base.complete(&req->base, err);
4405     @@ -837,7 +816,7 @@ static int ahash_final_ctx(struct ahash_request *req)
4406     edesc->sec4_sg_bytes = sec4_sg_bytes;
4407    
4408     ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
4409     - edesc->sec4_sg, DMA_TO_DEVICE);
4410     + edesc->sec4_sg, DMA_BIDIRECTIONAL);
4411     if (ret)
4412     goto unmap_ctx;
4413    
4414     @@ -857,14 +836,7 @@ static int ahash_final_ctx(struct ahash_request *req)
4415    
4416     append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
4417     LDST_SGF);
4418     -
4419     - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
4420     - digestsize);
4421     - if (dma_mapping_error(jrdev, edesc->dst_dma)) {
4422     - dev_err(jrdev, "unable to map dst\n");
4423     - ret = -ENOMEM;
4424     - goto unmap_ctx;
4425     - }
4426     + append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
4427    
4428     #ifdef DEBUG
4429     print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
4430     @@ -877,7 +849,7 @@ static int ahash_final_ctx(struct ahash_request *req)
4431    
4432     return -EINPROGRESS;
4433     unmap_ctx:
4434     - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
4435     + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
4436     kfree(edesc);
4437     return ret;
4438     }
4439     @@ -931,7 +903,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
4440     edesc->src_nents = src_nents;
4441    
4442     ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
4443     - edesc->sec4_sg, DMA_TO_DEVICE);
4444     + edesc->sec4_sg, DMA_BIDIRECTIONAL);
4445     if (ret)
4446     goto unmap_ctx;
4447    
4448     @@ -945,13 +917,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
4449     if (ret)
4450     goto unmap_ctx;
4451    
4452     - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
4453     - digestsize);
4454     - if (dma_mapping_error(jrdev, edesc->dst_dma)) {
4455     - dev_err(jrdev, "unable to map dst\n");
4456     - ret = -ENOMEM;
4457     - goto unmap_ctx;
4458     - }
4459     + append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
4460    
4461     #ifdef DEBUG
4462     print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
4463     @@ -964,7 +930,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
4464    
4465     return -EINPROGRESS;
4466     unmap_ctx:
4467     - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
4468     + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
4469     kfree(edesc);
4470     return ret;
4471     }
4472     @@ -1023,10 +989,8 @@ static int ahash_digest(struct ahash_request *req)
4473    
4474     desc = edesc->hw_desc;
4475    
4476     - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
4477     - digestsize);
4478     - if (dma_mapping_error(jrdev, edesc->dst_dma)) {
4479     - dev_err(jrdev, "unable to map dst\n");
4480     + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
4481     + if (ret) {
4482     ahash_unmap(jrdev, edesc, req, digestsize);
4483     kfree(edesc);
4484     return -ENOMEM;
4485     @@ -1041,7 +1005,7 @@ static int ahash_digest(struct ahash_request *req)
4486     if (!ret) {
4487     ret = -EINPROGRESS;
4488     } else {
4489     - ahash_unmap(jrdev, edesc, req, digestsize);
4490     + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
4491     kfree(edesc);
4492     }
4493    
4494     @@ -1083,12 +1047,9 @@ static int ahash_final_no_ctx(struct ahash_request *req)
4495     append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
4496     }
4497    
4498     - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
4499     - digestsize);
4500     - if (dma_mapping_error(jrdev, edesc->dst_dma)) {
4501     - dev_err(jrdev, "unable to map dst\n");
4502     + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
4503     + if (ret)
4504     goto unmap;
4505     - }
4506    
4507     #ifdef DEBUG
4508     print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
4509     @@ -1099,7 +1060,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
4510     if (!ret) {
4511     ret = -EINPROGRESS;
4512     } else {
4513     - ahash_unmap(jrdev, edesc, req, digestsize);
4514     + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
4515     kfree(edesc);
4516     }
4517    
4518     @@ -1298,12 +1259,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
4519     goto unmap;
4520     }
4521    
4522     - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
4523     - digestsize);
4524     - if (dma_mapping_error(jrdev, edesc->dst_dma)) {
4525     - dev_err(jrdev, "unable to map dst\n");
4526     + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
4527     + if (ret)
4528     goto unmap;
4529     - }
4530    
4531     #ifdef DEBUG
4532     print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
4533     @@ -1314,7 +1272,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
4534     if (!ret) {
4535     ret = -EINPROGRESS;
4536     } else {
4537     - ahash_unmap(jrdev, edesc, req, digestsize);
4538     + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
4539     kfree(edesc);
4540     }
4541    
4542     @@ -1446,6 +1404,7 @@ static int ahash_init(struct ahash_request *req)
4543     state->final = ahash_final_no_ctx;
4544    
4545     state->ctx_dma = 0;
4546     + state->ctx_dma_len = 0;
4547     state->current_buf = 0;
4548     state->buf_dma = 0;
4549     state->buflen_0 = 0;
4550     diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
4551     index dd948e1df9e5..3bcb6bce666e 100644
4552     --- a/drivers/crypto/ccree/cc_buffer_mgr.c
4553     +++ b/drivers/crypto/ccree/cc_buffer_mgr.c
4554     @@ -614,10 +614,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
4555     hw_iv_size, DMA_BIDIRECTIONAL);
4556     }
4557    
4558     - /*In case a pool was set, a table was
4559     - *allocated and should be released
4560     - */
4561     - if (areq_ctx->mlli_params.curr_pool) {
4562     + /* Release pool */
4563     + if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
4564     + areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
4565     + (areq_ctx->mlli_params.mlli_virt_addr)) {
4566     dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
4567     &areq_ctx->mlli_params.mlli_dma_addr,
4568     areq_ctx->mlli_params.mlli_virt_addr);
4569     diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
4570     index cc92b031fad1..4ec93079daaf 100644
4571     --- a/drivers/crypto/ccree/cc_cipher.c
4572     +++ b/drivers/crypto/ccree/cc_cipher.c
4573     @@ -80,6 +80,7 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
4574     default:
4575     break;
4576     }
4577     + break;
4578     case S_DIN_to_DES:
4579     if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
4580     return 0;
4581     @@ -652,6 +653,8 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
4582     unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
4583     unsigned int len;
4584    
4585     + cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
4586     +
4587     switch (ctx_p->cipher_mode) {
4588     case DRV_CIPHER_CBC:
4589     /*
4590     @@ -681,7 +684,6 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
4591     break;
4592     }
4593    
4594     - cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
4595     kzfree(req_ctx->iv);
4596    
4597     skcipher_request_complete(req, err);
4598     @@ -799,7 +801,8 @@ static int cc_cipher_decrypt(struct skcipher_request *req)
4599    
4600     memset(req_ctx, 0, sizeof(*req_ctx));
4601    
4602     - if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
4603     + if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
4604     + (req->cryptlen >= ivsize)) {
4605    
4606     /* Allocate and save the last IV sized bytes of the source,
4607     * which will be lost in case of in-place decryption.
4608     diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
4609     index c9d622abd90c..0ce4a65b95f5 100644
4610     --- a/drivers/crypto/rockchip/rk3288_crypto.c
4611     +++ b/drivers/crypto/rockchip/rk3288_crypto.c
4612     @@ -119,7 +119,7 @@ static int rk_load_data(struct rk_crypto_info *dev,
4613     count = (dev->left_bytes > PAGE_SIZE) ?
4614     PAGE_SIZE : dev->left_bytes;
4615    
4616     - if (!sg_pcopy_to_buffer(dev->first, dev->nents,
4617     + if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
4618     dev->addr_vir, count,
4619     dev->total - dev->left_bytes)) {
4620     dev_err(dev->dev, "[%s:%d] pcopy err\n",
4621     diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
4622     index d5fb4013fb42..54ee5b3ed9db 100644
4623     --- a/drivers/crypto/rockchip/rk3288_crypto.h
4624     +++ b/drivers/crypto/rockchip/rk3288_crypto.h
4625     @@ -207,7 +207,8 @@ struct rk_crypto_info {
4626     void *addr_vir;
4627     int aligned;
4628     int align_size;
4629     - size_t nents;
4630     + size_t src_nents;
4631     + size_t dst_nents;
4632     unsigned int total;
4633     unsigned int count;
4634     dma_addr_t addr_in;
4635     @@ -244,6 +245,7 @@ struct rk_cipher_ctx {
4636     struct rk_crypto_info *dev;
4637     unsigned int keylen;
4638     u32 mode;
4639     + u8 iv[AES_BLOCK_SIZE];
4640     };
4641    
4642     enum alg_type {
4643     diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
4644     index 639c15c5364b..23305f22072f 100644
4645     --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
4646     +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
4647     @@ -242,6 +242,17 @@ static void crypto_dma_start(struct rk_crypto_info *dev)
4648     static int rk_set_data_start(struct rk_crypto_info *dev)
4649     {
4650     int err;
4651     + struct ablkcipher_request *req =
4652     + ablkcipher_request_cast(dev->async_req);
4653     + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
4654     + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
4655     + u32 ivsize = crypto_ablkcipher_ivsize(tfm);
4656     + u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
4657     + dev->sg_src->offset + dev->sg_src->length - ivsize;
4658     +
4659     + /* store the iv that need to be updated in chain mode */
4660     + if (ctx->mode & RK_CRYPTO_DEC)
4661     + memcpy(ctx->iv, src_last_blk, ivsize);
4662    
4663     err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
4664     if (!err)
4665     @@ -260,8 +271,9 @@ static int rk_ablk_start(struct rk_crypto_info *dev)
4666     dev->total = req->nbytes;
4667     dev->sg_src = req->src;
4668     dev->first = req->src;
4669     - dev->nents = sg_nents(req->src);
4670     + dev->src_nents = sg_nents(req->src);
4671     dev->sg_dst = req->dst;
4672     + dev->dst_nents = sg_nents(req->dst);
4673     dev->aligned = 1;
4674    
4675     spin_lock_irqsave(&dev->lock, flags);
4676     @@ -285,6 +297,28 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
4677     memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
4678     }
4679    
4680     +static void rk_update_iv(struct rk_crypto_info *dev)
4681     +{
4682     + struct ablkcipher_request *req =
4683     + ablkcipher_request_cast(dev->async_req);
4684     + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
4685     + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
4686     + u32 ivsize = crypto_ablkcipher_ivsize(tfm);
4687     + u8 *new_iv = NULL;
4688     +
4689     + if (ctx->mode & RK_CRYPTO_DEC) {
4690     + new_iv = ctx->iv;
4691     + } else {
4692     + new_iv = page_address(sg_page(dev->sg_dst)) +
4693     + dev->sg_dst->offset + dev->sg_dst->length - ivsize;
4694     + }
4695     +
4696     + if (ivsize == DES_BLOCK_SIZE)
4697     + memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
4698     + else if (ivsize == AES_BLOCK_SIZE)
4699     + memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
4700     +}
4701     +
4702     /* return:
4703     * true some err was occurred
4704     * fault no err, continue
4705     @@ -297,7 +331,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
4706    
4707     dev->unload_data(dev);
4708     if (!dev->aligned) {
4709     - if (!sg_pcopy_from_buffer(req->dst, dev->nents,
4710     + if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
4711     dev->addr_vir, dev->count,
4712     dev->total - dev->left_bytes -
4713     dev->count)) {
4714     @@ -306,6 +340,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
4715     }
4716     }
4717     if (dev->left_bytes) {
4718     + rk_update_iv(dev);
4719     if (dev->aligned) {
4720     if (sg_is_last(dev->sg_src)) {
4721     dev_err(dev->dev, "[%s:%d] Lack of data\n",
4722     diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
4723     index 821a506b9e17..c336ae75e361 100644
4724     --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
4725     +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
4726     @@ -206,7 +206,7 @@ static int rk_ahash_start(struct rk_crypto_info *dev)
4727     dev->sg_dst = NULL;
4728     dev->sg_src = req->src;
4729     dev->first = req->src;
4730     - dev->nents = sg_nents(req->src);
4731     + dev->src_nents = sg_nents(req->src);
4732     rctx = ahash_request_ctx(req);
4733     rctx->mode = 0;
4734    
4735     diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
4736     index 7f7184c3cf95..59403f6d008a 100644
4737     --- a/drivers/dma/sh/usb-dmac.c
4738     +++ b/drivers/dma/sh/usb-dmac.c
4739     @@ -694,6 +694,8 @@ static int usb_dmac_runtime_resume(struct device *dev)
4740     #endif /* CONFIG_PM */
4741    
4742     static const struct dev_pm_ops usb_dmac_pm = {
4743     + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
4744     + pm_runtime_force_resume)
4745     SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
4746     NULL)
4747     };
4748     diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
4749     index 0dc96419efe3..d8a985fc6a5d 100644
4750     --- a/drivers/gpio/gpio-pca953x.c
4751     +++ b/drivers/gpio/gpio-pca953x.c
4752     @@ -587,7 +587,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
4753    
4754     static void pca953x_irq_shutdown(struct irq_data *d)
4755     {
4756     - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
4757     + struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
4758     + struct pca953x_chip *chip = gpiochip_get_data(gc);
4759     u8 mask = 1 << (d->hwirq % BANK_SZ);
4760    
4761     chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
4762     diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
4763     index 43e4a2be0fa6..57cc11d0e9a5 100644
4764     --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
4765     +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
4766     @@ -1355,12 +1355,12 @@ void dcn_bw_update_from_pplib(struct dc *dc)
4767     struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
4768     bool res;
4769    
4770     - kernel_fpu_begin();
4771     -
4772     /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
4773     res = dm_pp_get_clock_levels_by_type_with_voltage(
4774     ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
4775    
4776     + kernel_fpu_begin();
4777     +
4778     if (res)
4779     res = verify_clock_values(&fclks);
4780    
4781     @@ -1379,9 +1379,13 @@ void dcn_bw_update_from_pplib(struct dc *dc)
4782     } else
4783     BREAK_TO_DEBUGGER();
4784    
4785     + kernel_fpu_end();
4786     +
4787     res = dm_pp_get_clock_levels_by_type_with_voltage(
4788     ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
4789    
4790     + kernel_fpu_begin();
4791     +
4792     if (res)
4793     res = verify_clock_values(&dcfclks);
4794    
4795     diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
4796     index c8f5c00dd1e7..86e3fb27c125 100644
4797     --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
4798     +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
4799     @@ -3491,14 +3491,14 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
4800    
4801     smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
4802     cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4803     - ixSMU_PM_STATUS_94, 0);
4804     + ixSMU_PM_STATUS_95, 0);
4805    
4806     for (i = 0; i < 10; i++) {
4807     - mdelay(1);
4808     + mdelay(500);
4809     smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
4810     tmp = cgs_read_ind_register(hwmgr->device,
4811     CGS_IND_REG__SMC,
4812     - ixSMU_PM_STATUS_94);
4813     + ixSMU_PM_STATUS_95);
4814     if (tmp != 0)
4815     break;
4816     }
4817     diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
4818     index d73703a695e8..70fc8e356b18 100644
4819     --- a/drivers/gpu/drm/drm_fb_helper.c
4820     +++ b/drivers/gpu/drm/drm_fb_helper.c
4821     @@ -3170,9 +3170,7 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client)
4822    
4823     static int drm_fbdev_client_restore(struct drm_client_dev *client)
4824     {
4825     - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
4826     -
4827     - drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
4828     + drm_fb_helper_lastclose(client->dev);
4829    
4830     return 0;
4831     }
4832     diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
4833     index f471537c852f..1e14c6921454 100644
4834     --- a/drivers/gpu/drm/radeon/evergreen_cs.c
4835     +++ b/drivers/gpu/drm/radeon/evergreen_cs.c
4836     @@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
4837     return -EINVAL;
4838     }
4839     ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
4840     + break;
4841     case CB_TARGET_MASK:
4842     track->cb_target_mask = radeon_get_ib_value(p, idx);
4843     track->cb_dirty = true;
4844     diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
4845     index 8426b7970c14..cc287cf6eb29 100644
4846     --- a/drivers/hwtracing/intel_th/gth.c
4847     +++ b/drivers/hwtracing/intel_th/gth.c
4848     @@ -607,6 +607,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
4849     {
4850     struct gth_device *gth = dev_get_drvdata(&thdev->dev);
4851     int port = othdev->output.port;
4852     + int master;
4853    
4854     if (thdev->host_mode)
4855     return;
4856     @@ -615,6 +616,9 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
4857     othdev->output.port = -1;
4858     othdev->output.active = false;
4859     gth->output[port].output = NULL;
4860     + for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
4861     + if (gth->master[master] == port)
4862     + gth->master[master] = -1;
4863     spin_unlock(&gth->gth_lock);
4864     }
4865    
4866     diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
4867     index 93ce3aa740a9..c7ba8acfd4d5 100644
4868     --- a/drivers/hwtracing/stm/core.c
4869     +++ b/drivers/hwtracing/stm/core.c
4870     @@ -244,6 +244,9 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
4871     ;
4872     if (i == width)
4873     return pos;
4874     +
4875     + /* step over [pos..pos+i) to continue search */
4876     + pos += i;
4877     }
4878    
4879     return -1;
4880     @@ -732,7 +735,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
4881     struct stm_device *stm = stmf->stm;
4882     struct stp_policy_id *id;
4883     char *ids[] = { NULL, NULL };
4884     - int ret = -EINVAL;
4885     + int ret = -EINVAL, wlimit = 1;
4886     u32 size;
4887    
4888     if (stmf->output.nr_chans)
4889     @@ -760,8 +763,10 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
4890     if (id->__reserved_0 || id->__reserved_1)
4891     goto err_free;
4892    
4893     - if (id->width < 1 ||
4894     - id->width > PAGE_SIZE / stm->data->sw_mmiosz)
4895     + if (stm->data->sw_mmiosz)
4896     + wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
4897     +
4898     + if (id->width < 1 || id->width > wlimit)
4899     goto err_free;
4900    
4901     ids[0] = id->id;
4902     diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
4903     index c77adbbea0c7..e85dc8583896 100644
4904     --- a/drivers/i2c/busses/i2c-tegra.c
4905     +++ b/drivers/i2c/busses/i2c-tegra.c
4906     @@ -118,6 +118,9 @@
4907     #define I2C_MST_FIFO_STATUS_TX_MASK 0xff0000
4908     #define I2C_MST_FIFO_STATUS_TX_SHIFT 16
4909    
4910     +/* Packet header size in bytes */
4911     +#define I2C_PACKET_HEADER_SIZE 12
4912     +
4913     /*
4914     * msg_end_type: The bus control which need to be send at end of transfer.
4915     * @MSG_END_STOP: Send stop pulse at end of transfer.
4916     @@ -836,12 +839,13 @@ static const struct i2c_algorithm tegra_i2c_algo = {
4917     /* payload size is only 12 bit */
4918     static const struct i2c_adapter_quirks tegra_i2c_quirks = {
4919     .flags = I2C_AQ_NO_ZERO_LEN,
4920     - .max_read_len = 4096,
4921     - .max_write_len = 4096,
4922     + .max_read_len = SZ_4K,
4923     + .max_write_len = SZ_4K - I2C_PACKET_HEADER_SIZE,
4924     };
4925    
4926     static const struct i2c_adapter_quirks tegra194_i2c_quirks = {
4927     .flags = I2C_AQ_NO_ZERO_LEN,
4928     + .max_write_len = SZ_64K - I2C_PACKET_HEADER_SIZE,
4929     };
4930    
4931     static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
4932     diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
4933     index fa2d2b5767f3..1ca2c4d39f87 100644
4934     --- a/drivers/iio/adc/exynos_adc.c
4935     +++ b/drivers/iio/adc/exynos_adc.c
4936     @@ -115,6 +115,7 @@
4937     #define MAX_ADC_V2_CHANNELS 10
4938     #define MAX_ADC_V1_CHANNELS 8
4939     #define MAX_EXYNOS3250_ADC_CHANNELS 2
4940     +#define MAX_EXYNOS4212_ADC_CHANNELS 4
4941     #define MAX_S5PV210_ADC_CHANNELS 10
4942    
4943     /* Bit definitions common for ADC_V1 and ADC_V2 */
4944     @@ -271,6 +272,19 @@ static void exynos_adc_v1_start_conv(struct exynos_adc *info,
4945     writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
4946     }
4947    
4948     +/* Exynos4212 and 4412 is like ADCv1 but with four channels only */
4949     +static const struct exynos_adc_data exynos4212_adc_data = {
4950     + .num_channels = MAX_EXYNOS4212_ADC_CHANNELS,
4951     + .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
4952     + .needs_adc_phy = true,
4953     + .phy_offset = EXYNOS_ADCV1_PHY_OFFSET,
4954     +
4955     + .init_hw = exynos_adc_v1_init_hw,
4956     + .exit_hw = exynos_adc_v1_exit_hw,
4957     + .clear_irq = exynos_adc_v1_clear_irq,
4958     + .start_conv = exynos_adc_v1_start_conv,
4959     +};
4960     +
4961     static const struct exynos_adc_data exynos_adc_v1_data = {
4962     .num_channels = MAX_ADC_V1_CHANNELS,
4963     .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
4964     @@ -492,6 +506,9 @@ static const struct of_device_id exynos_adc_match[] = {
4965     }, {
4966     .compatible = "samsung,s5pv210-adc",
4967     .data = &exynos_adc_s5pv210_data,
4968     + }, {
4969     + .compatible = "samsung,exynos4212-adc",
4970     + .data = &exynos4212_adc_data,
4971     }, {
4972     .compatible = "samsung,exynos-adc-v1",
4973     .data = &exynos_adc_v1_data,
4974     @@ -929,7 +946,7 @@ static int exynos_adc_remove(struct platform_device *pdev)
4975     struct iio_dev *indio_dev = platform_get_drvdata(pdev);
4976     struct exynos_adc *info = iio_priv(indio_dev);
4977    
4978     - if (IS_REACHABLE(CONFIG_INPUT)) {
4979     + if (IS_REACHABLE(CONFIG_INPUT) && info->input) {
4980     free_irq(info->tsirq, info);
4981     input_unregister_device(info->input);
4982     }
4983     diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
4984     index 6db2276f5c13..15ec3e1feb09 100644
4985     --- a/drivers/infiniband/hw/hfi1/hfi.h
4986     +++ b/drivers/infiniband/hw/hfi1/hfi.h
4987     @@ -1435,7 +1435,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
4988     struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
4989     void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
4990     int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
4991     -void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
4992     +int hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
4993     struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
4994     u16 ctxt);
4995     struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
4996     diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
4997     index 7835eb52e7c5..c532ceb0bb9a 100644
4998     --- a/drivers/infiniband/hw/hfi1/init.c
4999     +++ b/drivers/infiniband/hw/hfi1/init.c
5000     @@ -215,12 +215,12 @@ static void hfi1_rcd_free(struct kref *kref)
5001     struct hfi1_ctxtdata *rcd =
5002     container_of(kref, struct hfi1_ctxtdata, kref);
5003    
5004     - hfi1_free_ctxtdata(rcd->dd, rcd);
5005     -
5006     spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
5007     rcd->dd->rcd[rcd->ctxt] = NULL;
5008     spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
5009    
5010     + hfi1_free_ctxtdata(rcd->dd, rcd);
5011     +
5012     kfree(rcd);
5013     }
5014    
5015     @@ -243,10 +243,13 @@ int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
5016     * @rcd: pointer to an initialized rcd data structure
5017     *
5018     * Use this to get a reference after the init.
5019     + *
5020     + * Return : reflect kref_get_unless_zero(), which returns non-zero on
5021     + * increment, otherwise 0.
5022     */
5023     -void hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
5024     +int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
5025     {
5026     - kref_get(&rcd->kref);
5027     + return kref_get_unless_zero(&rcd->kref);
5028     }
5029    
5030     /**
5031     @@ -326,7 +329,8 @@ struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
5032     spin_lock_irqsave(&dd->uctxt_lock, flags);
5033     if (dd->rcd[ctxt]) {
5034     rcd = dd->rcd[ctxt];
5035     - hfi1_rcd_get(rcd);
5036     + if (!hfi1_rcd_get(rcd))
5037     + rcd = NULL;
5038     }
5039     spin_unlock_irqrestore(&dd->uctxt_lock, flags);
5040    
5041     diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
5042     index c6cc3e4ab71d..c45b8359b389 100644
5043     --- a/drivers/infiniband/sw/rdmavt/qp.c
5044     +++ b/drivers/infiniband/sw/rdmavt/qp.c
5045     @@ -2785,6 +2785,18 @@ again:
5046     }
5047     EXPORT_SYMBOL(rvt_copy_sge);
5048    
5049     +static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
5050     + struct rvt_qp *sqp)
5051     +{
5052     + rvp->n_pkt_drops++;
5053     + /*
5054     + * For RC, the requester would timeout and retry so
5055     + * shortcut the timeouts and just signal too many retries.
5056     + */
5057     + return sqp->ibqp.qp_type == IB_QPT_RC ?
5058     + IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
5059     +}
5060     +
5061     /**
5062     * ruc_loopback - handle UC and RC loopback requests
5063     * @sqp: the sending QP
5064     @@ -2857,17 +2869,14 @@ again:
5065     }
5066     spin_unlock_irqrestore(&sqp->s_lock, flags);
5067    
5068     - if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
5069     + if (!qp) {
5070     + send_status = loopback_qp_drop(rvp, sqp);
5071     + goto serr_no_r_lock;
5072     + }
5073     + spin_lock_irqsave(&qp->r_lock, flags);
5074     + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
5075     qp->ibqp.qp_type != sqp->ibqp.qp_type) {
5076     - rvp->n_pkt_drops++;
5077     - /*
5078     - * For RC, the requester would timeout and retry so
5079     - * shortcut the timeouts and just signal too many retries.
5080     - */
5081     - if (sqp->ibqp.qp_type == IB_QPT_RC)
5082     - send_status = IB_WC_RETRY_EXC_ERR;
5083     - else
5084     - send_status = IB_WC_SUCCESS;
5085     + send_status = loopback_qp_drop(rvp, sqp);
5086     goto serr;
5087     }
5088    
5089     @@ -2893,18 +2902,8 @@ again:
5090     goto send_comp;
5091    
5092     case IB_WR_SEND_WITH_INV:
5093     - if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
5094     - wc.wc_flags = IB_WC_WITH_INVALIDATE;
5095     - wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
5096     - }
5097     - goto send;
5098     -
5099     case IB_WR_SEND_WITH_IMM:
5100     - wc.wc_flags = IB_WC_WITH_IMM;
5101     - wc.ex.imm_data = wqe->wr.ex.imm_data;
5102     - /* FALLTHROUGH */
5103     case IB_WR_SEND:
5104     -send:
5105     ret = rvt_get_rwqe(qp, false);
5106     if (ret < 0)
5107     goto op_err;
5108     @@ -2912,6 +2911,22 @@ send:
5109     goto rnr_nak;
5110     if (wqe->length > qp->r_len)
5111     goto inv_err;
5112     + switch (wqe->wr.opcode) {
5113     + case IB_WR_SEND_WITH_INV:
5114     + if (!rvt_invalidate_rkey(qp,
5115     + wqe->wr.ex.invalidate_rkey)) {
5116     + wc.wc_flags = IB_WC_WITH_INVALIDATE;
5117     + wc.ex.invalidate_rkey =
5118     + wqe->wr.ex.invalidate_rkey;
5119     + }
5120     + break;
5121     + case IB_WR_SEND_WITH_IMM:
5122     + wc.wc_flags = IB_WC_WITH_IMM;
5123     + wc.ex.imm_data = wqe->wr.ex.imm_data;
5124     + break;
5125     + default:
5126     + break;
5127     + }
5128     break;
5129    
5130     case IB_WR_RDMA_WRITE_WITH_IMM:
5131     @@ -3041,6 +3056,7 @@ do_write:
5132     wqe->wr.send_flags & IB_SEND_SOLICITED);
5133    
5134     send_comp:
5135     + spin_unlock_irqrestore(&qp->r_lock, flags);
5136     spin_lock_irqsave(&sqp->s_lock, flags);
5137     rvp->n_loop_pkts++;
5138     flush_send:
5139     @@ -3067,6 +3083,7 @@ rnr_nak:
5140     }
5141     if (sqp->s_rnr_retry_cnt < 7)
5142     sqp->s_rnr_retry--;
5143     + spin_unlock_irqrestore(&qp->r_lock, flags);
5144     spin_lock_irqsave(&sqp->s_lock, flags);
5145     if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
5146     goto clr_busy;
5147     @@ -3095,6 +3112,8 @@ err:
5148     rvt_rc_error(qp, wc.status);
5149    
5150     serr:
5151     + spin_unlock_irqrestore(&qp->r_lock, flags);
5152     +serr_no_r_lock:
5153     spin_lock_irqsave(&sqp->s_lock, flags);
5154     rvt_send_complete(sqp, wqe, send_status);
5155     if (sqp->ibqp.qp_type == IB_QPT_RC) {
5156     diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
5157     index 0e65f609352e..83364fedbf0a 100644
5158     --- a/drivers/irqchip/irq-brcmstb-l2.c
5159     +++ b/drivers/irqchip/irq-brcmstb-l2.c
5160     @@ -129,8 +129,9 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
5161     struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
5162     struct irq_chip_type *ct = irq_data_get_chip_type(d);
5163     struct brcmstb_l2_intc_data *b = gc->private;
5164     + unsigned long flags;
5165    
5166     - irq_gc_lock(gc);
5167     + irq_gc_lock_irqsave(gc, flags);
5168     /* Save the current mask */
5169     b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
5170    
5171     @@ -139,7 +140,7 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
5172     irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
5173     irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
5174     }
5175     - irq_gc_unlock(gc);
5176     + irq_gc_unlock_irqrestore(gc, flags);
5177     }
5178    
5179     static void brcmstb_l2_intc_resume(struct irq_data *d)
5180     @@ -147,8 +148,9 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
5181     struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
5182     struct irq_chip_type *ct = irq_data_get_chip_type(d);
5183     struct brcmstb_l2_intc_data *b = gc->private;
5184     + unsigned long flags;
5185    
5186     - irq_gc_lock(gc);
5187     + irq_gc_lock_irqsave(gc, flags);
5188     if (ct->chip.irq_ack) {
5189     /* Clear unmasked non-wakeup interrupts */
5190     irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
5191     @@ -158,7 +160,7 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
5192     /* Restore the saved mask */
5193     irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
5194     irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
5195     - irq_gc_unlock(gc);
5196     + irq_gc_unlock_irqrestore(gc, flags);
5197     }
5198    
5199     static int __init brcmstb_l2_intc_of_init(struct device_node *np,
5200     diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
5201     index c3aba3fc818d..f867d41b0aa1 100644
5202     --- a/drivers/irqchip/irq-gic-v3-its.c
5203     +++ b/drivers/irqchip/irq-gic-v3-its.c
5204     @@ -1955,6 +1955,8 @@ static int its_alloc_tables(struct its_node *its)
5205     indirect = its_parse_indirect_baser(its, baser,
5206     psz, &order,
5207     its->device_ids);
5208     + break;
5209     +
5210     case GITS_BASER_TYPE_VCPU:
5211     indirect = its_parse_indirect_baser(its, baser,
5212     psz, &order,
5213     diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
5214     index 956004366699..886710043025 100644
5215     --- a/drivers/md/bcache/extents.c
5216     +++ b/drivers/md/bcache/extents.c
5217     @@ -538,6 +538,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
5218     {
5219     struct btree *b = container_of(bk, struct btree, keys);
5220     unsigned int i, stale;
5221     + char buf[80];
5222    
5223     if (!KEY_PTRS(k) ||
5224     bch_extent_invalid(bk, k))
5225     @@ -547,19 +548,19 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
5226     if (!ptr_available(b->c, k, i))
5227     return true;
5228    
5229     - if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
5230     - return false;
5231     -
5232     for (i = 0; i < KEY_PTRS(k); i++) {
5233     stale = ptr_stale(b->c, k, i);
5234    
5235     + if (stale && KEY_DIRTY(k)) {
5236     + bch_extent_to_text(buf, sizeof(buf), k);
5237     + pr_info("stale dirty pointer, stale %u, key: %s",
5238     + stale, buf);
5239     + }
5240     +
5241     btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
5242     "key too stale: %i, need_gc %u",
5243     stale, b->c->need_gc);
5244    
5245     - btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
5246     - b, "stale dirty pointer");
5247     -
5248     if (stale)
5249     return true;
5250    
5251     diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
5252     index 15070412a32e..f101bfe8657a 100644
5253     --- a/drivers/md/bcache/request.c
5254     +++ b/drivers/md/bcache/request.c
5255     @@ -392,10 +392,11 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
5256    
5257     /*
5258     * Flag for bypass if the IO is for read-ahead or background,
5259     - * unless the read-ahead request is for metadata (eg, for gfs2).
5260     + * unless the read-ahead request is for metadata
5261     + * (eg, for gfs2 or xfs).
5262     */
5263     if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
5264     - !(bio->bi_opf & REQ_PRIO))
5265     + !(bio->bi_opf & (REQ_META|REQ_PRIO)))
5266     goto skip;
5267    
5268     if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
5269     @@ -877,7 +878,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
5270     }
5271    
5272     if (!(bio->bi_opf & REQ_RAHEAD) &&
5273     - !(bio->bi_opf & REQ_PRIO) &&
5274     + !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
5275     s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
5276     reada = min_t(sector_t, dc->readahead >> 9,
5277     get_capacity(bio->bi_disk) - bio_end_sector(bio));
5278     diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
5279     index 6a743d3bb338..4e4c6810dc3c 100644
5280     --- a/drivers/md/bcache/writeback.h
5281     +++ b/drivers/md/bcache/writeback.h
5282     @@ -71,6 +71,9 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
5283     in_use > bch_cutoff_writeback_sync)
5284     return false;
5285    
5286     + if (bio_op(bio) == REQ_OP_DISCARD)
5287     + return false;
5288     +
5289     if (dc->partial_stripes_expensive &&
5290     bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
5291     bio_sectors(bio)))
5292     diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
5293     index 457200ca6287..2e823252d797 100644
5294     --- a/drivers/md/dm-integrity.c
5295     +++ b/drivers/md/dm-integrity.c
5296     @@ -1368,8 +1368,8 @@ again:
5297     checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
5298     if (unlikely(r)) {
5299     if (r > 0) {
5300     - DMERR("Checksum failed at sector 0x%llx",
5301     - (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
5302     + DMERR_LIMIT("Checksum failed at sector 0x%llx",
5303     + (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
5304     r = -EILSEQ;
5305     atomic64_inc(&ic->number_of_mismatches);
5306     }
5307     @@ -1561,8 +1561,8 @@ retry_kmap:
5308    
5309     integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
5310     if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
5311     - DMERR("Checksum failed when reading from journal, at sector 0x%llx",
5312     - (unsigned long long)logical_sector);
5313     + DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
5314     + (unsigned long long)logical_sector);
5315     }
5316     }
5317     #endif
5318     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
5319     index ecef42bfe19d..3b6880dd648d 100644
5320     --- a/drivers/md/raid10.c
5321     +++ b/drivers/md/raid10.c
5322     @@ -3939,6 +3939,8 @@ static int raid10_run(struct mddev *mddev)
5323     set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5324     mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5325     "reshape");
5326     + if (!mddev->sync_thread)
5327     + goto out_free_conf;
5328     }
5329    
5330     return 0;
5331     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
5332     index cecea901ab8c..5b68f2d0da60 100644
5333     --- a/drivers/md/raid5.c
5334     +++ b/drivers/md/raid5.c
5335     @@ -7402,6 +7402,8 @@ static int raid5_run(struct mddev *mddev)
5336     set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5337     mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5338     "reshape");
5339     + if (!mddev->sync_thread)
5340     + goto abort;
5341     }
5342    
5343     /* Ok, everything is just fine now */
5344     diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
5345     index 96807e134886..8abb1a510a81 100644
5346     --- a/drivers/media/dvb-frontends/lgdt330x.c
5347     +++ b/drivers/media/dvb-frontends/lgdt330x.c
5348     @@ -783,7 +783,7 @@ static int lgdt3303_read_status(struct dvb_frontend *fe,
5349    
5350     if ((buf[0] & 0x02) == 0x00)
5351     *status |= FE_HAS_SYNC;
5352     - if ((buf[0] & 0xfd) == 0x01)
5353     + if ((buf[0] & 0x01) == 0x01)
5354     *status |= FE_HAS_VITERBI | FE_HAS_LOCK;
5355     break;
5356     default:
5357     diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
5358     index b168bf3635b6..8b0b8b5aa531 100644
5359     --- a/drivers/media/i2c/cx25840/cx25840-core.c
5360     +++ b/drivers/media/i2c/cx25840/cx25840-core.c
5361     @@ -5216,8 +5216,9 @@ static int cx25840_probe(struct i2c_client *client,
5362     * those extra inputs. So, let's add it only when needed.
5363     */
5364     state->pads[CX25840_PAD_INPUT].flags = MEDIA_PAD_FL_SINK;
5365     + state->pads[CX25840_PAD_INPUT].sig_type = PAD_SIGNAL_ANALOG;
5366     state->pads[CX25840_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
5367     - state->pads[CX25840_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
5368     + state->pads[CX25840_PAD_VID_OUT].sig_type = PAD_SIGNAL_DV;
5369     sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
5370    
5371     ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(state->pads),
5372     diff --git a/drivers/media/i2c/cx25840/cx25840-core.h b/drivers/media/i2c/cx25840/cx25840-core.h
5373     index c323b1af1f83..9efefa15d090 100644
5374     --- a/drivers/media/i2c/cx25840/cx25840-core.h
5375     +++ b/drivers/media/i2c/cx25840/cx25840-core.h
5376     @@ -40,7 +40,6 @@ enum cx25840_model {
5377     enum cx25840_media_pads {
5378     CX25840_PAD_INPUT,
5379     CX25840_PAD_VID_OUT,
5380     - CX25840_PAD_VBI_OUT,
5381    
5382     CX25840_NUM_PADS
5383     };
5384     diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
5385     index bef3f3aae0ed..9f8fc1ad9b1a 100644
5386     --- a/drivers/media/i2c/ov5640.c
5387     +++ b/drivers/media/i2c/ov5640.c
5388     @@ -1893,7 +1893,7 @@ static void ov5640_reset(struct ov5640_dev *sensor)
5389     usleep_range(1000, 2000);
5390    
5391     gpiod_set_value_cansleep(sensor->reset_gpio, 0);
5392     - usleep_range(5000, 10000);
5393     + usleep_range(20000, 25000);
5394     }
5395    
5396     static int ov5640_set_power_on(struct ov5640_dev *sensor)
5397     diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
5398     index 6950585edb5a..d16f54cdc3b0 100644
5399     --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
5400     +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
5401     @@ -793,7 +793,7 @@ static const struct regmap_config sun6i_csi_regmap_config = {
5402     .reg_bits = 32,
5403     .reg_stride = 4,
5404     .val_bits = 32,
5405     - .max_register = 0x1000,
5406     + .max_register = 0x9c,
5407     };
5408    
5409     static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev,
5410     diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
5411     index 4b2e3de7856e..c4fc8e7d365a 100644
5412     --- a/drivers/media/platform/vimc/Makefile
5413     +++ b/drivers/media/platform/vimc/Makefile
5414     @@ -5,6 +5,7 @@ vimc_common-objs := vimc-common.o
5415     vimc_debayer-objs := vimc-debayer.o
5416     vimc_scaler-objs := vimc-scaler.o
5417     vimc_sensor-objs := vimc-sensor.o
5418     +vimc_streamer-objs := vimc-streamer.o
5419    
5420     obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \
5421     - vimc_scaler.o vimc_sensor.o
5422     + vimc_scaler.o vimc_sensor.o vimc_streamer.o
5423     diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
5424     index 3f7e9ed56633..80d7515ec420 100644
5425     --- a/drivers/media/platform/vimc/vimc-capture.c
5426     +++ b/drivers/media/platform/vimc/vimc-capture.c
5427     @@ -24,6 +24,7 @@
5428     #include <media/videobuf2-vmalloc.h>
5429    
5430     #include "vimc-common.h"
5431     +#include "vimc-streamer.h"
5432    
5433     #define VIMC_CAP_DRV_NAME "vimc-capture"
5434    
5435     @@ -44,7 +45,7 @@ struct vimc_cap_device {
5436     spinlock_t qlock;
5437     struct mutex lock;
5438     u32 sequence;
5439     - struct media_pipeline pipe;
5440     + struct vimc_stream stream;
5441     };
5442    
5443     static const struct v4l2_pix_format fmt_default = {
5444     @@ -248,14 +249,13 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
5445     vcap->sequence = 0;
5446    
5447     /* Start the media pipeline */
5448     - ret = media_pipeline_start(entity, &vcap->pipe);
5449     + ret = media_pipeline_start(entity, &vcap->stream.pipe);
5450     if (ret) {
5451     vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
5452     return ret;
5453     }
5454    
5455     - /* Enable streaming from the pipe */
5456     - ret = vimc_pipeline_s_stream(&vcap->vdev.entity, 1);
5457     + ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
5458     if (ret) {
5459     media_pipeline_stop(entity);
5460     vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
5461     @@ -273,8 +273,7 @@ static void vimc_cap_stop_streaming(struct vb2_queue *vq)
5462     {
5463     struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
5464    
5465     - /* Disable streaming from the pipe */
5466     - vimc_pipeline_s_stream(&vcap->vdev.entity, 0);
5467     + vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0);
5468    
5469     /* Stop the media pipeline */
5470     media_pipeline_stop(&vcap->vdev.entity);
5471     @@ -355,8 +354,8 @@ static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
5472     kfree(vcap);
5473     }
5474    
5475     -static void vimc_cap_process_frame(struct vimc_ent_device *ved,
5476     - struct media_pad *sink, const void *frame)
5477     +static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
5478     + const void *frame)
5479     {
5480     struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
5481     ved);
5482     @@ -370,7 +369,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
5483     typeof(*vimc_buf), list);
5484     if (!vimc_buf) {
5485     spin_unlock(&vcap->qlock);
5486     - return;
5487     + return ERR_PTR(-EAGAIN);
5488     }
5489    
5490     /* Remove this entry from the list */
5491     @@ -391,6 +390,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
5492     vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0,
5493     vcap->format.sizeimage);
5494     vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
5495     + return NULL;
5496     }
5497    
5498     static int vimc_cap_comp_bind(struct device *comp, struct device *master,
5499     diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
5500     index 867e24dbd6b5..c1a74bb2df58 100644
5501     --- a/drivers/media/platform/vimc/vimc-common.c
5502     +++ b/drivers/media/platform/vimc/vimc-common.c
5503     @@ -207,41 +207,6 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
5504     }
5505     EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
5506    
5507     -int vimc_propagate_frame(struct media_pad *src, const void *frame)
5508     -{
5509     - struct media_link *link;
5510     -
5511     - if (!(src->flags & MEDIA_PAD_FL_SOURCE))
5512     - return -EINVAL;
5513     -
5514     - /* Send this frame to all sink pads that are direct linked */
5515     - list_for_each_entry(link, &src->entity->links, list) {
5516     - if (link->source == src &&
5517     - (link->flags & MEDIA_LNK_FL_ENABLED)) {
5518     - struct vimc_ent_device *ved = NULL;
5519     - struct media_entity *entity = link->sink->entity;
5520     -
5521     - if (is_media_entity_v4l2_subdev(entity)) {
5522     - struct v4l2_subdev *sd =
5523     - container_of(entity, struct v4l2_subdev,
5524     - entity);
5525     - ved = v4l2_get_subdevdata(sd);
5526     - } else if (is_media_entity_v4l2_video_device(entity)) {
5527     - struct video_device *vdev =
5528     - container_of(entity,
5529     - struct video_device,
5530     - entity);
5531     - ved = video_get_drvdata(vdev);
5532     - }
5533     - if (ved && ved->process_frame)
5534     - ved->process_frame(ved, link->sink, frame);
5535     - }
5536     - }
5537     -
5538     - return 0;
5539     -}
5540     -EXPORT_SYMBOL_GPL(vimc_propagate_frame);
5541     -
5542     /* Helper function to allocate and initialize pads */
5543     struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
5544     {
5545     diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
5546     index 2e9981b18166..6ed969d9efbb 100644
5547     --- a/drivers/media/platform/vimc/vimc-common.h
5548     +++ b/drivers/media/platform/vimc/vimc-common.h
5549     @@ -113,23 +113,12 @@ struct vimc_pix_map {
5550     struct vimc_ent_device {
5551     struct media_entity *ent;
5552     struct media_pad *pads;
5553     - void (*process_frame)(struct vimc_ent_device *ved,
5554     - struct media_pad *sink, const void *frame);
5555     + void * (*process_frame)(struct vimc_ent_device *ved,
5556     + const void *frame);
5557     void (*vdev_get_format)(struct vimc_ent_device *ved,
5558     struct v4l2_pix_format *fmt);
5559     };
5560    
5561     -/**
5562     - * vimc_propagate_frame - propagate a frame through the topology
5563     - *
5564     - * @src: the source pad where the frame is being originated
5565     - * @frame: the frame to be propagated
5566     - *
5567     - * This function will call the process_frame callback from the vimc_ent_device
5568     - * struct of the nodes directly connected to the @src pad
5569     - */
5570     -int vimc_propagate_frame(struct media_pad *src, const void *frame);
5571     -
5572     /**
5573     * vimc_pads_init - initialize pads
5574     *
5575     diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
5576     index 77887f66f323..7d77c63b99d2 100644
5577     --- a/drivers/media/platform/vimc/vimc-debayer.c
5578     +++ b/drivers/media/platform/vimc/vimc-debayer.c
5579     @@ -321,7 +321,6 @@ static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb,
5580     static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
5581     {
5582     struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
5583     - int ret;
5584    
5585     if (enable) {
5586     const struct vimc_pix_map *vpix;
5587     @@ -351,22 +350,10 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
5588     if (!vdeb->src_frame)
5589     return -ENOMEM;
5590    
5591     - /* Turn the stream on in the subdevices directly connected */
5592     - ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 1);
5593     - if (ret) {
5594     - vfree(vdeb->src_frame);
5595     - vdeb->src_frame = NULL;
5596     - return ret;
5597     - }
5598     } else {
5599     if (!vdeb->src_frame)
5600     return 0;
5601    
5602     - /* Disable streaming from the pipe */
5603     - ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 0);
5604     - if (ret)
5605     - return ret;
5606     -
5607     vfree(vdeb->src_frame);
5608     vdeb->src_frame = NULL;
5609     }
5610     @@ -480,9 +467,8 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
5611     }
5612     }
5613    
5614     -static void vimc_deb_process_frame(struct vimc_ent_device *ved,
5615     - struct media_pad *sink,
5616     - const void *sink_frame)
5617     +static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
5618     + const void *sink_frame)
5619     {
5620     struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
5621     ved);
5622     @@ -491,7 +477,7 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved,
5623    
5624     /* If the stream in this node is not active, just return */
5625     if (!vdeb->src_frame)
5626     - return;
5627     + return ERR_PTR(-EINVAL);
5628    
5629     for (i = 0; i < vdeb->sink_fmt.height; i++)
5630     for (j = 0; j < vdeb->sink_fmt.width; j++) {
5631     @@ -499,12 +485,8 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved,
5632     vdeb->set_rgb_src(vdeb, i, j, rgb);
5633     }
5634    
5635     - /* Propagate the frame through all source pads */
5636     - for (i = 1; i < vdeb->sd.entity.num_pads; i++) {
5637     - struct media_pad *pad = &vdeb->sd.entity.pads[i];
5638     + return vdeb->src_frame;
5639    
5640     - vimc_propagate_frame(pad, vdeb->src_frame);
5641     - }
5642     }
5643    
5644     static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
5645     diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
5646     index b0952ee86296..39b2a73dfcc1 100644
5647     --- a/drivers/media/platform/vimc/vimc-scaler.c
5648     +++ b/drivers/media/platform/vimc/vimc-scaler.c
5649     @@ -217,7 +217,6 @@ static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
5650     static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
5651     {
5652     struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
5653     - int ret;
5654    
5655     if (enable) {
5656     const struct vimc_pix_map *vpix;
5657     @@ -245,22 +244,10 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
5658     if (!vsca->src_frame)
5659     return -ENOMEM;
5660    
5661     - /* Turn the stream on in the subdevices directly connected */
5662     - ret = vimc_pipeline_s_stream(&vsca->sd.entity, 1);
5663     - if (ret) {
5664     - vfree(vsca->src_frame);
5665     - vsca->src_frame = NULL;
5666     - return ret;
5667     - }
5668     } else {
5669     if (!vsca->src_frame)
5670     return 0;
5671    
5672     - /* Disable streaming from the pipe */
5673     - ret = vimc_pipeline_s_stream(&vsca->sd.entity, 0);
5674     - if (ret)
5675     - return ret;
5676     -
5677     vfree(vsca->src_frame);
5678     vsca->src_frame = NULL;
5679     }
5680     @@ -346,26 +333,19 @@ static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
5681     vimc_sca_scale_pix(vsca, i, j, sink_frame);
5682     }
5683    
5684     -static void vimc_sca_process_frame(struct vimc_ent_device *ved,
5685     - struct media_pad *sink,
5686     - const void *sink_frame)
5687     +static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
5688     + const void *sink_frame)
5689     {
5690     struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
5691     ved);
5692     - unsigned int i;
5693    
5694     /* If the stream in this node is not active, just return */
5695     if (!vsca->src_frame)
5696     - return;
5697     + return ERR_PTR(-EINVAL);
5698    
5699     vimc_sca_fill_src_frame(vsca, sink_frame);
5700    
5701     - /* Propagate the frame through all source pads */
5702     - for (i = 1; i < vsca->sd.entity.num_pads; i++) {
5703     - struct media_pad *pad = &vsca->sd.entity.pads[i];
5704     -
5705     - vimc_propagate_frame(pad, vsca->src_frame);
5706     - }
5707     + return vsca->src_frame;
5708     };
5709    
5710     static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
5711     diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
5712     index 32ca9c6172b1..93961a1e694f 100644
5713     --- a/drivers/media/platform/vimc/vimc-sensor.c
5714     +++ b/drivers/media/platform/vimc/vimc-sensor.c
5715     @@ -16,8 +16,6 @@
5716     */
5717    
5718     #include <linux/component.h>
5719     -#include <linux/freezer.h>
5720     -#include <linux/kthread.h>
5721     #include <linux/module.h>
5722     #include <linux/mod_devicetable.h>
5723     #include <linux/platform_device.h>
5724     @@ -201,38 +199,27 @@ static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = {
5725     .set_fmt = vimc_sen_set_fmt,
5726     };
5727    
5728     -static int vimc_sen_tpg_thread(void *data)
5729     +static void *vimc_sen_process_frame(struct vimc_ent_device *ved,
5730     + const void *sink_frame)
5731     {
5732     - struct vimc_sen_device *vsen = data;
5733     - unsigned int i;
5734     -
5735     - set_freezable();
5736     - set_current_state(TASK_UNINTERRUPTIBLE);
5737     -
5738     - for (;;) {
5739     - try_to_freeze();
5740     - if (kthread_should_stop())
5741     - break;
5742     -
5743     - tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
5744     + struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device,
5745     + ved);
5746     + const struct vimc_pix_map *vpix;
5747     + unsigned int frame_size;
5748    
5749     - /* Send the frame to all source pads */
5750     - for (i = 0; i < vsen->sd.entity.num_pads; i++)
5751     - vimc_propagate_frame(&vsen->sd.entity.pads[i],
5752     - vsen->frame);
5753     + /* Calculate the frame size */
5754     + vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
5755     + frame_size = vsen->mbus_format.width * vpix->bpp *
5756     + vsen->mbus_format.height;
5757    
5758     - /* 60 frames per second */
5759     - schedule_timeout(HZ/60);
5760     - }
5761     -
5762     - return 0;
5763     + tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
5764     + return vsen->frame;
5765     }
5766    
5767     static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
5768     {
5769     struct vimc_sen_device *vsen =
5770     container_of(sd, struct vimc_sen_device, sd);
5771     - int ret;
5772    
5773     if (enable) {
5774     const struct vimc_pix_map *vpix;
5775     @@ -258,26 +245,8 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
5776     /* configure the test pattern generator */
5777     vimc_sen_tpg_s_format(vsen);
5778    
5779     - /* Initialize the image generator thread */
5780     - vsen->kthread_sen = kthread_run(vimc_sen_tpg_thread, vsen,
5781     - "%s-sen", vsen->sd.v4l2_dev->name);
5782     - if (IS_ERR(vsen->kthread_sen)) {
5783     - dev_err(vsen->dev, "%s: kernel_thread() failed\n",
5784     - vsen->sd.name);
5785     - vfree(vsen->frame);
5786     - vsen->frame = NULL;
5787     - return PTR_ERR(vsen->kthread_sen);
5788     - }
5789     } else {
5790     - if (!vsen->kthread_sen)
5791     - return 0;
5792     -
5793     - /* Stop image generator */
5794     - ret = kthread_stop(vsen->kthread_sen);
5795     - if (ret)
5796     - return ret;
5797    
5798     - vsen->kthread_sen = NULL;
5799     vfree(vsen->frame);
5800     vsen->frame = NULL;
5801     return 0;
5802     @@ -413,6 +382,7 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master,
5803     if (ret)
5804     goto err_free_hdl;
5805    
5806     + vsen->ved.process_frame = vimc_sen_process_frame;
5807     dev_set_drvdata(comp, &vsen->ved);
5808     vsen->dev = comp;
5809    
5810     diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
5811     new file mode 100644
5812     index 000000000000..fcc897fb247b
5813     --- /dev/null
5814     +++ b/drivers/media/platform/vimc/vimc-streamer.c
5815     @@ -0,0 +1,188 @@
5816     +// SPDX-License-Identifier: GPL-2.0+
5817     +/*
5818     + * vimc-streamer.c Virtual Media Controller Driver
5819     + *
5820     + * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
5821     + *
5822     + */
5823     +
5824     +#include <linux/init.h>
5825     +#include <linux/module.h>
5826     +#include <linux/freezer.h>
5827     +#include <linux/kthread.h>
5828     +
5829     +#include "vimc-streamer.h"
5830     +
5831     +/**
5832     + * vimc_get_source_entity - get the entity connected with the first sink pad
5833     + *
5834     + * @ent: reference media_entity
5835     + *
5836     + * Helper function that returns the media entity containing the source pad
5837     + * linked with the first sink pad from the given media entity pad list.
5838     + */
5839     +static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
5840     +{
5841     + struct media_pad *pad;
5842     + int i;
5843     +
5844     + for (i = 0; i < ent->num_pads; i++) {
5845     + if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
5846     + continue;
5847     + pad = media_entity_remote_pad(&ent->pads[i]);
5848     + return pad ? pad->entity : NULL;
5849     + }
5850     + return NULL;
5851     +}
5852     +
5853     +/*
5854     + * vimc_streamer_pipeline_terminate - Disable stream in all ved in stream
5855     + *
5856     + * @stream: the pointer to the stream structure with the pipeline to be
5857     + * disabled.
5858     + *
5859     + * Calls s_stream to disable the stream in each entity of the pipeline
5860     + *
5861     + */
5862     +static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
5863     +{
5864     + struct media_entity *entity;
5865     + struct v4l2_subdev *sd;
5866     +
5867     + while (stream->pipe_size) {
5868     + stream->pipe_size--;
5869     + entity = stream->ved_pipeline[stream->pipe_size]->ent;
5870     + entity = vimc_get_source_entity(entity);
5871     + stream->ved_pipeline[stream->pipe_size] = NULL;
5872     +
5873     + if (!is_media_entity_v4l2_subdev(entity))
5874     + continue;
5875     +
5876     + sd = media_entity_to_v4l2_subdev(entity);
5877     + v4l2_subdev_call(sd, video, s_stream, 0);
5878     + }
5879     +}
5880     +
5881     +/*
5882     + * vimc_streamer_pipeline_init - initializes the stream structure
5883     + *
5884     + * @stream: the pointer to the stream structure to be initialized
5885     + * @ved: the pointer to the vimc entity initializing the stream
5886     + *
5887     + * Initializes the stream structure. Walks through the entity graph to
5888     + * construct the pipeline used later on the streamer thread.
5889     + * Calls s_stream to enable stream in all entities of the pipeline.
5890     + */
5891     +static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
5892     + struct vimc_ent_device *ved)
5893     +{
5894     + struct media_entity *entity;
5895     + struct video_device *vdev;
5896     + struct v4l2_subdev *sd;
5897     + int ret = 0;
5898     +
5899     + stream->pipe_size = 0;
5900     + while (stream->pipe_size < VIMC_STREAMER_PIPELINE_MAX_SIZE) {
5901     + if (!ved) {
5902     + vimc_streamer_pipeline_terminate(stream);
5903     + return -EINVAL;
5904     + }
5905     + stream->ved_pipeline[stream->pipe_size++] = ved;
5906     +
5907     + entity = vimc_get_source_entity(ved->ent);
5908     + /* Check if the end of the pipeline was reached*/
5909     + if (!entity)
5910     + return 0;
5911     +
5912     + if (is_media_entity_v4l2_subdev(entity)) {
5913     + sd = media_entity_to_v4l2_subdev(entity);
5914     + ret = v4l2_subdev_call(sd, video, s_stream, 1);
5915     + if (ret && ret != -ENOIOCTLCMD) {
5916     + vimc_streamer_pipeline_terminate(stream);
5917     + return ret;
5918     + }
5919     + ved = v4l2_get_subdevdata(sd);
5920     + } else {
5921     + vdev = container_of(entity,
5922     + struct video_device,
5923     + entity);
5924     + ved = video_get_drvdata(vdev);
5925     + }
5926     + }
5927     +
5928     + vimc_streamer_pipeline_terminate(stream);
5929     + return -EINVAL;
5930     +}
5931     +
5932     +static int vimc_streamer_thread(void *data)
5933     +{
5934     + struct vimc_stream *stream = data;
5935     + int i;
5936     +
5937     + set_freezable();
5938     + set_current_state(TASK_UNINTERRUPTIBLE);
5939     +
5940     + for (;;) {
5941     + try_to_freeze();
5942     + if (kthread_should_stop())
5943     + break;
5944     +
5945     + for (i = stream->pipe_size - 1; i >= 0; i--) {
5946     + stream->frame = stream->ved_pipeline[i]->process_frame(
5947     + stream->ved_pipeline[i],
5948     + stream->frame);
5949     + if (!stream->frame)
5950     + break;
5951     + if (IS_ERR(stream->frame))
5952     + break;
5953     + }
5954     + //wait for 60hz
5955     + schedule_timeout(HZ / 60);
5956     + }
5957     +
5958     + return 0;
5959     +}
5960     +
5961     +int vimc_streamer_s_stream(struct vimc_stream *stream,
5962     + struct vimc_ent_device *ved,
5963     + int enable)
5964     +{
5965     + int ret;
5966     +
5967     + if (!stream || !ved)
5968     + return -EINVAL;
5969     +
5970     + if (enable) {
5971     + if (stream->kthread)
5972     + return 0;
5973     +
5974     + ret = vimc_streamer_pipeline_init(stream, ved);
5975     + if (ret)
5976     + return ret;
5977     +
5978     + stream->kthread = kthread_run(vimc_streamer_thread, stream,
5979     + "vimc-streamer thread");
5980     +
5981     + if (IS_ERR(stream->kthread))
5982     + return PTR_ERR(stream->kthread);
5983     +
5984     + } else {
5985     + if (!stream->kthread)
5986     + return 0;
5987     +
5988     + ret = kthread_stop(stream->kthread);
5989     + if (ret)
5990     + return ret;
5991     +
5992     + stream->kthread = NULL;
5993     +
5994     + vimc_streamer_pipeline_terminate(stream);
5995     + }
5996     +
5997     + return 0;
5998     +}
5999     +EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
6000     +
6001     +MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer");
6002     +MODULE_AUTHOR("Lucas A. M. Magalhães <lucmaga@gmail.com>");
6003     +MODULE_LICENSE("GPL");
6004     diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/platform/vimc/vimc-streamer.h
6005     new file mode 100644
6006     index 000000000000..752af2e2d5a2
6007     --- /dev/null
6008     +++ b/drivers/media/platform/vimc/vimc-streamer.h
6009     @@ -0,0 +1,38 @@
6010     +/* SPDX-License-Identifier: GPL-2.0+ */
6011     +/*
6012     + * vimc-streamer.h Virtual Media Controller Driver
6013     + *
6014     + * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
6015     + *
6016     + */
6017     +
6018     +#ifndef _VIMC_STREAMER_H_
6019     +#define _VIMC_STREAMER_H_
6020     +
6021     +#include <media/media-device.h>
6022     +
6023     +#include "vimc-common.h"
6024     +
6025     +#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16
6026     +
6027     +struct vimc_stream {
6028     + struct media_pipeline pipe;
6029     + struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE];
6030     + unsigned int pipe_size;
6031     + u8 *frame;
6032     + struct task_struct *kthread;
6033     +};
6034     +
6035     +/**
6036     + * vimc_streamer_s_streamer - start/stop the stream
6037     + *
6038     + * @stream: the pointer to the stream to start or stop
6039     + * @ved: The last entity of the streamer pipeline
6040     + * @enable: any non-zero number start the stream, zero stop
6041     + *
6042     + */
6043     +int vimc_streamer_s_stream(struct vimc_stream *stream,
6044     + struct vimc_ent_device *ved,
6045     + int enable);
6046     +
6047     +#endif //_VIMC_STREAMER_H_
6048     diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
6049     index 84525ff04745..e314657a1843 100644
6050     --- a/drivers/media/usb/uvc/uvc_video.c
6051     +++ b/drivers/media/usb/uvc/uvc_video.c
6052     @@ -676,6 +676,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
6053     if (!uvc_hw_timestamps_param)
6054     return;
6055    
6056     + /*
6057     + * We will get called from __vb2_queue_cancel() if there are buffers
6058     + * done but not dequeued by the user, but the sample array has already
6059     + * been released at that time. Just bail out in that case.
6060     + */
6061     + if (!clock->samples)
6062     + return;
6063     +
6064     spin_lock_irqsave(&clock->lock, flags);
6065    
6066     if (clock->count < clock->size)
6067     diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
6068     index a530972c5a7e..e0173bf4b0dc 100644
6069     --- a/drivers/mfd/sm501.c
6070     +++ b/drivers/mfd/sm501.c
6071     @@ -1145,6 +1145,9 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
6072     lookup = devm_kzalloc(&pdev->dev,
6073     sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
6074     GFP_KERNEL);
6075     + if (!lookup)
6076     + return -ENOMEM;
6077     +
6078     lookup->dev_id = "i2c-gpio";
6079     if (iic->pin_sda < 32)
6080     lookup->table[0].chip_label = "SM501-LOW";
6081     diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
6082     index 5d28d9e454f5..08f4a512afad 100644
6083     --- a/drivers/misc/cxl/guest.c
6084     +++ b/drivers/misc/cxl/guest.c
6085     @@ -267,6 +267,7 @@ static int guest_reset(struct cxl *adapter)
6086     int i, rc;
6087    
6088     pr_devel("Adapter reset request\n");
6089     + spin_lock(&adapter->afu_list_lock);
6090     for (i = 0; i < adapter->slices; i++) {
6091     if ((afu = adapter->afu[i])) {
6092     pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
6093     @@ -283,6 +284,7 @@ static int guest_reset(struct cxl *adapter)
6094     pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
6095     }
6096     }
6097     + spin_unlock(&adapter->afu_list_lock);
6098     return rc;
6099     }
6100    
6101     diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
6102     index c79ba1c699ad..300531d6136f 100644
6103     --- a/drivers/misc/cxl/pci.c
6104     +++ b/drivers/misc/cxl/pci.c
6105     @@ -1805,7 +1805,7 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
6106     /* There should only be one entry, but go through the list
6107     * anyway
6108     */
6109     - if (afu->phb == NULL)
6110     + if (afu == NULL || afu->phb == NULL)
6111     return result;
6112    
6113     list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
6114     @@ -1832,7 +1832,8 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
6115     {
6116     struct cxl *adapter = pci_get_drvdata(pdev);
6117     struct cxl_afu *afu;
6118     - pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result;
6119     + pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
6120     + pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
6121     int i;
6122    
6123     /* At this point, we could still have an interrupt pending.
6124     @@ -1843,6 +1844,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
6125    
6126     /* If we're permanently dead, give up. */
6127     if (state == pci_channel_io_perm_failure) {
6128     + spin_lock(&adapter->afu_list_lock);
6129     for (i = 0; i < adapter->slices; i++) {
6130     afu = adapter->afu[i];
6131     /*
6132     @@ -1851,6 +1853,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
6133     */
6134     cxl_vphb_error_detected(afu, state);
6135     }
6136     + spin_unlock(&adapter->afu_list_lock);
6137     return PCI_ERS_RESULT_DISCONNECT;
6138     }
6139    
6140     @@ -1932,11 +1935,17 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
6141     * * In slot_reset, free the old resources and allocate new ones.
6142     * * In resume, clear the flag to allow things to start.
6143     */
6144     +
6145     + /* Make sure no one else changes the afu list */
6146     + spin_lock(&adapter->afu_list_lock);
6147     +
6148     for (i = 0; i < adapter->slices; i++) {
6149     afu = adapter->afu[i];
6150    
6151     - afu_result = cxl_vphb_error_detected(afu, state);
6152     + if (afu == NULL)
6153     + continue;
6154    
6155     + afu_result = cxl_vphb_error_detected(afu, state);
6156     cxl_context_detach_all(afu);
6157     cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
6158     pci_deconfigure_afu(afu);
6159     @@ -1948,6 +1957,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
6160     (result == PCI_ERS_RESULT_NEED_RESET))
6161     result = PCI_ERS_RESULT_NONE;
6162     }
6163     + spin_unlock(&adapter->afu_list_lock);
6164    
6165     /* should take the context lock here */
6166     if (cxl_adapter_context_lock(adapter) != 0)
6167     @@ -1980,14 +1990,18 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
6168     */
6169     cxl_adapter_context_unlock(adapter);
6170    
6171     + spin_lock(&adapter->afu_list_lock);
6172     for (i = 0; i < adapter->slices; i++) {
6173     afu = adapter->afu[i];
6174    
6175     + if (afu == NULL)
6176     + continue;
6177     +
6178     if (pci_configure_afu(afu, adapter, pdev))
6179     - goto err;
6180     + goto err_unlock;
6181    
6182     if (cxl_afu_select_best_mode(afu))
6183     - goto err;
6184     + goto err_unlock;
6185    
6186     if (afu->phb == NULL)
6187     continue;
6188     @@ -1999,16 +2013,16 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
6189     ctx = cxl_get_context(afu_dev);
6190    
6191     if (ctx && cxl_release_context(ctx))
6192     - goto err;
6193     + goto err_unlock;
6194    
6195     ctx = cxl_dev_context_init(afu_dev);
6196     if (IS_ERR(ctx))
6197     - goto err;
6198     + goto err_unlock;
6199    
6200     afu_dev->dev.archdata.cxl_ctx = ctx;
6201    
6202     if (cxl_ops->afu_check_and_enable(afu))
6203     - goto err;
6204     + goto err_unlock;
6205    
6206     afu_dev->error_state = pci_channel_io_normal;
6207    
6208     @@ -2029,8 +2043,13 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
6209     result = PCI_ERS_RESULT_DISCONNECT;
6210     }
6211     }
6212     +
6213     + spin_unlock(&adapter->afu_list_lock);
6214     return result;
6215    
6216     +err_unlock:
6217     + spin_unlock(&adapter->afu_list_lock);
6218     +
6219     err:
6220     /* All the bits that happen in both error_detected and cxl_remove
6221     * should be idempotent, so we don't need to worry about leaving a mix
6222     @@ -2051,10 +2070,11 @@ static void cxl_pci_resume(struct pci_dev *pdev)
6223     * This is not the place to be checking if everything came back up
6224     * properly, because there's no return value: do that in slot_reset.
6225     */
6226     + spin_lock(&adapter->afu_list_lock);
6227     for (i = 0; i < adapter->slices; i++) {
6228     afu = adapter->afu[i];
6229    
6230     - if (afu->phb == NULL)
6231     + if (afu == NULL || afu->phb == NULL)
6232     continue;
6233    
6234     list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
6235     @@ -2063,6 +2083,7 @@ static void cxl_pci_resume(struct pci_dev *pdev)
6236     afu_dev->driver->err_handler->resume(afu_dev);
6237     }
6238     }
6239     + spin_unlock(&adapter->afu_list_lock);
6240     }
6241    
6242     static const struct pci_error_handlers cxl_err_handler = {
6243     diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
6244     index fc3872fe7b25..c383322ec2ba 100644
6245     --- a/drivers/misc/mei/bus.c
6246     +++ b/drivers/misc/mei/bus.c
6247     @@ -541,17 +541,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
6248     goto out;
6249     }
6250    
6251     - if (!mei_cl_bus_module_get(cldev)) {
6252     - dev_err(&cldev->dev, "get hw module failed");
6253     - ret = -ENODEV;
6254     - goto out;
6255     - }
6256     -
6257     ret = mei_cl_connect(cl, cldev->me_cl, NULL);
6258     - if (ret < 0) {
6259     + if (ret < 0)
6260     dev_err(&cldev->dev, "cannot connect\n");
6261     - mei_cl_bus_module_put(cldev);
6262     - }
6263    
6264     out:
6265     mutex_unlock(&bus->device_lock);
6266     @@ -614,7 +606,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
6267     if (err < 0)
6268     dev_err(bus->dev, "Could not disconnect from the ME client\n");
6269    
6270     - mei_cl_bus_module_put(cldev);
6271     out:
6272     /* Flush queues and remove any pending read */
6273     mei_cl_flush_queues(cl, NULL);
6274     @@ -725,9 +716,16 @@ static int mei_cl_device_probe(struct device *dev)
6275     if (!id)
6276     return -ENODEV;
6277    
6278     + if (!mei_cl_bus_module_get(cldev)) {
6279     + dev_err(&cldev->dev, "get hw module failed");
6280     + return -ENODEV;
6281     + }
6282     +
6283     ret = cldrv->probe(cldev, id);
6284     - if (ret)
6285     + if (ret) {
6286     + mei_cl_bus_module_put(cldev);
6287     return ret;
6288     + }
6289    
6290     __module_get(THIS_MODULE);
6291     return 0;
6292     @@ -755,6 +753,7 @@ static int mei_cl_device_remove(struct device *dev)
6293    
6294     mei_cldev_unregister_callbacks(cldev);
6295    
6296     + mei_cl_bus_module_put(cldev);
6297     module_put(THIS_MODULE);
6298     dev->driver = NULL;
6299     return ret;
6300     diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
6301     index 8f7616557c97..e6207f614816 100644
6302     --- a/drivers/misc/mei/hbm.c
6303     +++ b/drivers/misc/mei/hbm.c
6304     @@ -1029,29 +1029,36 @@ static void mei_hbm_config_features(struct mei_device *dev)
6305     dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
6306     dev->hbm_f_pg_supported = 1;
6307    
6308     + dev->hbm_f_dc_supported = 0;
6309     if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
6310     dev->hbm_f_dc_supported = 1;
6311    
6312     + dev->hbm_f_ie_supported = 0;
6313     if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
6314     dev->hbm_f_ie_supported = 1;
6315    
6316     /* disconnect on connect timeout instead of link reset */
6317     + dev->hbm_f_dot_supported = 0;
6318     if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
6319     dev->hbm_f_dot_supported = 1;
6320    
6321     /* Notification Event Support */
6322     + dev->hbm_f_ev_supported = 0;
6323     if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
6324     dev->hbm_f_ev_supported = 1;
6325    
6326     /* Fixed Address Client Support */
6327     + dev->hbm_f_fa_supported = 0;
6328     if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
6329     dev->hbm_f_fa_supported = 1;
6330    
6331     /* OS ver message Support */
6332     + dev->hbm_f_os_supported = 0;
6333     if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
6334     dev->hbm_f_os_supported = 1;
6335    
6336     /* DMA Ring Support */
6337     + dev->hbm_f_dr_supported = 0;
6338     if (dev->version.major_version > HBM_MAJOR_VERSION_DR ||
6339     (dev->version.major_version == HBM_MAJOR_VERSION_DR &&
6340     dev->version.minor_version >= HBM_MINOR_VERSION_DR))
6341     diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
6342     index f8240b87df22..f69acb5d4a50 100644
6343     --- a/drivers/misc/vmw_balloon.c
6344     +++ b/drivers/misc/vmw_balloon.c
6345     @@ -1287,7 +1287,7 @@ static void vmballoon_reset(struct vmballoon *b)
6346     vmballoon_pop(b);
6347    
6348     if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
6349     - return;
6350     + goto unlock;
6351    
6352     if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
6353     if (vmballoon_init_batching(b)) {
6354     @@ -1298,7 +1298,7 @@ static void vmballoon_reset(struct vmballoon *b)
6355     * The guest will retry in one second.
6356     */
6357     vmballoon_send_start(b, 0);
6358     - return;
6359     + goto unlock;
6360     }
6361     } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
6362     vmballoon_deinit_batching(b);
6363     @@ -1314,6 +1314,7 @@ static void vmballoon_reset(struct vmballoon *b)
6364     if (vmballoon_send_guest_id(b))
6365     pr_err("failed to send guest ID to the host\n");
6366    
6367     +unlock:
6368     up_write(&b->conf_sem);
6369     }
6370    
6371     diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
6372     index b27a1e620233..1e6b07c176dc 100644
6373     --- a/drivers/mmc/core/core.c
6374     +++ b/drivers/mmc/core/core.c
6375     @@ -2381,9 +2381,9 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
6376     return card->pref_erase;
6377    
6378     max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
6379     - if (max_discard && mmc_can_trim(card)) {
6380     + if (mmc_can_trim(card)) {
6381     max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
6382     - if (max_trim < max_discard)
6383     + if (max_trim < max_discard || max_discard == 0)
6384     max_discard = max_trim;
6385     } else if (max_discard < card->erase_size) {
6386     max_discard = 0;
6387     diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
6388     index 31a351a20dc0..7e2a75c4f36f 100644
6389     --- a/drivers/mmc/host/renesas_sdhi_core.c
6390     +++ b/drivers/mmc/host/renesas_sdhi_core.c
6391     @@ -723,6 +723,13 @@ int renesas_sdhi_probe(struct platform_device *pdev,
6392     host->ops.start_signal_voltage_switch =
6393     renesas_sdhi_start_signal_voltage_switch;
6394     host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27;
6395     +
6396     + /* SDR and HS200/400 registers requires HW reset */
6397     + if (of_data && of_data->scc_offset) {
6398     + priv->scc_ctl = host->ctl + of_data->scc_offset;
6399     + host->mmc->caps |= MMC_CAP_HW_RESET;
6400     + host->hw_reset = renesas_sdhi_hw_reset;
6401     + }
6402     }
6403    
6404     /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
6405     @@ -775,8 +782,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
6406     const struct renesas_sdhi_scc *taps = of_data->taps;
6407     bool hit = false;
6408    
6409     - host->mmc->caps |= MMC_CAP_HW_RESET;
6410     -
6411     for (i = 0; i < of_data->taps_num; i++) {
6412     if (taps[i].clk_rate == 0 ||
6413     taps[i].clk_rate == host->mmc->f_max) {
6414     @@ -789,12 +794,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
6415     if (!hit)
6416     dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
6417    
6418     - priv->scc_ctl = host->ctl + of_data->scc_offset;
6419     host->init_tuning = renesas_sdhi_init_tuning;
6420     host->prepare_tuning = renesas_sdhi_prepare_tuning;
6421     host->select_tuning = renesas_sdhi_select_tuning;
6422     host->check_scc_error = renesas_sdhi_check_scc_error;
6423     - host->hw_reset = renesas_sdhi_hw_reset;
6424     host->prepare_hs400_tuning =
6425     renesas_sdhi_prepare_hs400_tuning;
6426     host->hs400_downgrade = renesas_sdhi_disable_scc;
6427     diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
6428     index 00d41b312c79..a6f25c796aed 100644
6429     --- a/drivers/mmc/host/sdhci-esdhc-imx.c
6430     +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
6431     @@ -979,6 +979,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
6432     case MMC_TIMING_UHS_SDR25:
6433     case MMC_TIMING_UHS_SDR50:
6434     case MMC_TIMING_UHS_SDR104:
6435     + case MMC_TIMING_MMC_HS:
6436     case MMC_TIMING_MMC_HS200:
6437     writel(m, host->ioaddr + ESDHC_MIX_CTRL);
6438     break;
6439     diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
6440     index ddc1f9ca8ebc..4543ac97f077 100644
6441     --- a/drivers/net/dsa/lantiq_gswip.c
6442     +++ b/drivers/net/dsa/lantiq_gswip.c
6443     @@ -1069,10 +1069,10 @@ static int gswip_probe(struct platform_device *pdev)
6444     version = gswip_switch_r(priv, GSWIP_VERSION);
6445    
6446     /* bring up the mdio bus */
6447     - gphy_fw_np = of_find_compatible_node(pdev->dev.of_node, NULL,
6448     - "lantiq,gphy-fw");
6449     + gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
6450     if (gphy_fw_np) {
6451     err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
6452     + of_node_put(gphy_fw_np);
6453     if (err) {
6454     dev_err(dev, "gphy fw probe failed\n");
6455     return err;
6456     @@ -1080,13 +1080,12 @@ static int gswip_probe(struct platform_device *pdev)
6457     }
6458    
6459     /* bring up the mdio bus */
6460     - mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
6461     - "lantiq,xrx200-mdio");
6462     + mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
6463     if (mdio_np) {
6464     err = gswip_mdio(priv, mdio_np);
6465     if (err) {
6466     dev_err(dev, "mdio probe failed\n");
6467     - goto gphy_fw;
6468     + goto put_mdio_node;
6469     }
6470     }
6471    
6472     @@ -1099,7 +1098,7 @@ static int gswip_probe(struct platform_device *pdev)
6473     dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
6474     priv->hw_info->cpu_port);
6475     err = -EINVAL;
6476     - goto mdio_bus;
6477     + goto disable_switch;
6478     }
6479    
6480     platform_set_drvdata(pdev, priv);
6481     @@ -1109,10 +1108,14 @@ static int gswip_probe(struct platform_device *pdev)
6482     (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
6483     return 0;
6484    
6485     +disable_switch:
6486     + gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
6487     + dsa_unregister_switch(priv->ds);
6488     mdio_bus:
6489     if (mdio_np)
6490     mdiobus_unregister(priv->ds->slave_mii_bus);
6491     -gphy_fw:
6492     +put_mdio_node:
6493     + of_node_put(mdio_np);
6494     for (i = 0; i < priv->num_gphy_fw; i++)
6495     gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
6496     return err;
6497     @@ -1131,8 +1134,10 @@ static int gswip_remove(struct platform_device *pdev)
6498    
6499     dsa_unregister_switch(priv->ds);
6500    
6501     - if (priv->ds->slave_mii_bus)
6502     + if (priv->ds->slave_mii_bus) {
6503     mdiobus_unregister(priv->ds->slave_mii_bus);
6504     + of_node_put(priv->ds->slave_mii_bus->dev.of_node);
6505     + }
6506    
6507     for (i = 0; i < priv->num_gphy_fw; i++)
6508     gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
6509     diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
6510     index 789337ea676a..6ede6168bd85 100644
6511     --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
6512     +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
6513     @@ -433,8 +433,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
6514     skb_tail_pointer(skb),
6515     MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
6516    
6517     - cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
6518     -
6519     lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n",
6520     cardp->rx_urb);
6521     ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
6522     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
6523     index c08bf371e527..7c9dfa54fee8 100644
6524     --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
6525     +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
6526     @@ -309,7 +309,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
6527     ccmp_pn[6] = pn >> 32;
6528     ccmp_pn[7] = pn >> 40;
6529     txwi->iv = *((__le32 *)&ccmp_pn[0]);
6530     - txwi->eiv = *((__le32 *)&ccmp_pn[1]);
6531     + txwi->eiv = *((__le32 *)&ccmp_pn[4]);
6532     }
6533    
6534     spin_lock_bh(&dev->mt76.lock);
6535     diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
6536     index a11bf4e6b451..6d6e9a12150b 100644
6537     --- a/drivers/nvdimm/label.c
6538     +++ b/drivers/nvdimm/label.c
6539     @@ -755,7 +755,7 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
6540    
6541     static int __pmem_label_update(struct nd_region *nd_region,
6542     struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
6543     - int pos)
6544     + int pos, unsigned long flags)
6545     {
6546     struct nd_namespace_common *ndns = &nspm->nsio.common;
6547     struct nd_interleave_set *nd_set = nd_region->nd_set;
6548     @@ -796,7 +796,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
6549     memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
6550     if (nspm->alt_name)
6551     memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
6552     - nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
6553     + nd_label->flags = __cpu_to_le32(flags);
6554     nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
6555     nd_label->position = __cpu_to_le16(pos);
6556     nd_label->isetcookie = __cpu_to_le64(cookie);
6557     @@ -1249,13 +1249,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
6558     int nd_pmem_namespace_label_update(struct nd_region *nd_region,
6559     struct nd_namespace_pmem *nspm, resource_size_t size)
6560     {
6561     - int i;
6562     + int i, rc;
6563    
6564     for (i = 0; i < nd_region->ndr_mappings; i++) {
6565     struct nd_mapping *nd_mapping = &nd_region->mapping[i];
6566     struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
6567     struct resource *res;
6568     - int rc, count = 0;
6569     + int count = 0;
6570    
6571     if (size == 0) {
6572     rc = del_labels(nd_mapping, nspm->uuid);
6573     @@ -1273,7 +1273,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
6574     if (rc < 0)
6575     return rc;
6576    
6577     - rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
6578     + rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
6579     + NSLABEL_FLAG_UPDATING);
6580     + if (rc)
6581     + return rc;
6582     + }
6583     +
6584     + if (size == 0)
6585     + return 0;
6586     +
6587     + /* Clear the UPDATING flag per UEFI 2.7 expectations */
6588     + for (i = 0; i < nd_region->ndr_mappings; i++) {
6589     + struct nd_mapping *nd_mapping = &nd_region->mapping[i];
6590     +
6591     + rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
6592     if (rc)
6593     return rc;
6594     }
6595     diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
6596     index 4b077555ac70..33a3b23b3db7 100644
6597     --- a/drivers/nvdimm/namespace_devs.c
6598     +++ b/drivers/nvdimm/namespace_devs.c
6599     @@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
6600     bool pmem_should_map_pages(struct device *dev)
6601     {
6602     struct nd_region *nd_region = to_nd_region(dev->parent);
6603     + struct nd_namespace_common *ndns = to_ndns(dev);
6604     struct nd_namespace_io *nsio;
6605    
6606     if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
6607     @@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev)
6608     if (is_nd_pfn(dev) || is_nd_btt(dev))
6609     return false;
6610    
6611     + if (ndns->force_raw)
6612     + return false;
6613     +
6614     nsio = to_nd_namespace_io(dev);
6615     if (region_intersects(nsio->res.start, resource_size(&nsio->res),
6616     IORESOURCE_SYSTEM_RAM,
6617     diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
6618     index 6f22272e8d80..7760c1b91853 100644
6619     --- a/drivers/nvdimm/pfn_devs.c
6620     +++ b/drivers/nvdimm/pfn_devs.c
6621     @@ -593,7 +593,7 @@ static unsigned long init_altmap_base(resource_size_t base)
6622    
6623     static unsigned long init_altmap_reserve(resource_size_t base)
6624     {
6625     - unsigned long reserve = PHYS_PFN(SZ_8K);
6626     + unsigned long reserve = PFN_UP(SZ_8K);
6627     unsigned long base_pfn = PHYS_PFN(base);
6628    
6629     reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
6630     @@ -678,7 +678,7 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun
6631     if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
6632     IORES_DESC_NONE) == REGION_MIXED
6633     || !IS_ALIGNED(end, nd_pfn->align)
6634     - || nd_region_conflict(nd_region, start, size + adjust))
6635     + || nd_region_conflict(nd_region, start, size))
6636     *end_trunc = end - phys_pmem_align_down(nd_pfn, end);
6637     }
6638    
6639     diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
6640     index f7301bb4ef3b..3ce65927e11c 100644
6641     --- a/drivers/nvmem/core.c
6642     +++ b/drivers/nvmem/core.c
6643     @@ -686,9 +686,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
6644     if (rval)
6645     goto err_remove_cells;
6646    
6647     - rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
6648     - if (rval)
6649     - goto err_remove_cells;
6650     + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
6651    
6652     return nvmem;
6653    
6654     diff --git a/drivers/opp/core.c b/drivers/opp/core.c
6655     index 18f1639dbc4a..f5d2fa195f5f 100644
6656     --- a/drivers/opp/core.c
6657     +++ b/drivers/opp/core.c
6658     @@ -743,7 +743,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
6659     old_freq, freq);
6660    
6661     /* Scaling up? Configure required OPPs before frequency */
6662     - if (freq > old_freq) {
6663     + if (freq >= old_freq) {
6664     ret = _set_required_opps(dev, opp_table, opp);
6665     if (ret)
6666     goto put_opp;
6667     diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
6668     index 9c8249f74479..6296dbb83d47 100644
6669     --- a/drivers/parport/parport_pc.c
6670     +++ b/drivers/parport/parport_pc.c
6671     @@ -1377,7 +1377,7 @@ static struct superio_struct *find_superio(struct parport *p)
6672     {
6673     int i;
6674     for (i = 0; i < NR_SUPERIOS; i++)
6675     - if (superios[i].io != p->base)
6676     + if (superios[i].io == p->base)
6677     return &superios[i];
6678     return NULL;
6679     }
6680     diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
6681     index 721d60a5d9e4..9c5614f21b8e 100644
6682     --- a/drivers/pci/controller/dwc/pcie-designware-host.c
6683     +++ b/drivers/pci/controller/dwc/pcie-designware-host.c
6684     @@ -439,7 +439,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
6685     if (ret)
6686     pci->num_viewport = 2;
6687    
6688     - if (IS_ENABLED(CONFIG_PCI_MSI)) {
6689     + if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) {
6690     /*
6691     * If a specific SoC driver needs to change the
6692     * default number of vectors, it needs to implement
6693     diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
6694     index d185ea5fe996..a7f703556790 100644
6695     --- a/drivers/pci/controller/dwc/pcie-qcom.c
6696     +++ b/drivers/pci/controller/dwc/pcie-qcom.c
6697     @@ -1228,7 +1228,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
6698    
6699     pcie->ops = of_device_get_match_data(dev);
6700    
6701     - pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
6702     + pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
6703     if (IS_ERR(pcie->reset)) {
6704     ret = PTR_ERR(pcie->reset);
6705     goto err_pm_runtime_put;
6706     diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
6707     index 750081c1cb48..6eecae447af3 100644
6708     --- a/drivers/pci/controller/pci-aardvark.c
6709     +++ b/drivers/pci/controller/pci-aardvark.c
6710     @@ -499,7 +499,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
6711     bridge->data = pcie;
6712     bridge->ops = &advk_pci_bridge_emul_ops;
6713    
6714     - pci_bridge_emul_init(bridge);
6715     + pci_bridge_emul_init(bridge, 0);
6716    
6717     }
6718    
6719     diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
6720     index fa0fc46edb0c..d3a0419e42f2 100644
6721     --- a/drivers/pci/controller/pci-mvebu.c
6722     +++ b/drivers/pci/controller/pci-mvebu.c
6723     @@ -583,7 +583,7 @@ static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
6724     bridge->data = port;
6725     bridge->ops = &mvebu_pci_bridge_emul_ops;
6726    
6727     - pci_bridge_emul_init(bridge);
6728     + pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
6729     }
6730    
6731     static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
6732     diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
6733     index 7dd443aea5a5..c0fb64ace05a 100644
6734     --- a/drivers/pci/hotplug/pciehp_hpc.c
6735     +++ b/drivers/pci/hotplug/pciehp_hpc.c
6736     @@ -736,12 +736,25 @@ void pcie_clear_hotplug_events(struct controller *ctrl)
6737    
6738     void pcie_enable_interrupt(struct controller *ctrl)
6739     {
6740     - pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_HPIE, PCI_EXP_SLTCTL_HPIE);
6741     + u16 mask;
6742     +
6743     + mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
6744     + pcie_write_cmd(ctrl, mask, mask);
6745     }
6746    
6747     void pcie_disable_interrupt(struct controller *ctrl)
6748     {
6749     - pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_HPIE);
6750     + u16 mask;
6751     +
6752     + /*
6753     + * Mask hot-plug interrupt to prevent it triggering immediately
6754     + * when the link goes inactive (we still get PME when any of the
6755     + * enabled events is detected). Same goes with Link Layer State
6756     + * changed event which generates PME immediately when the link goes
6757     + * inactive so mask it as well.
6758     + */
6759     + mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
6760     + pcie_write_cmd(ctrl, 0, mask);
6761     }
6762    
6763     /*
6764     diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
6765     index 129738362d90..83fb077d0b41 100644
6766     --- a/drivers/pci/pci-bridge-emul.c
6767     +++ b/drivers/pci/pci-bridge-emul.c
6768     @@ -24,29 +24,6 @@
6769     #define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
6770     #define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
6771    
6772     -/*
6773     - * Initialize a pci_bridge_emul structure to represent a fake PCI
6774     - * bridge configuration space. The caller needs to have initialized
6775     - * the PCI configuration space with whatever values make sense
6776     - * (typically at least vendor, device, revision), the ->ops pointer,
6777     - * and optionally ->data and ->has_pcie.
6778     - */
6779     -void pci_bridge_emul_init(struct pci_bridge_emul *bridge)
6780     -{
6781     - bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
6782     - bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
6783     - bridge->conf.cache_line_size = 0x10;
6784     - bridge->conf.status = PCI_STATUS_CAP_LIST;
6785     -
6786     - if (bridge->has_pcie) {
6787     - bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
6788     - bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
6789     - /* Set PCIe v2, root port, slot support */
6790     - bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
6791     - PCI_EXP_FLAGS_SLOT;
6792     - }
6793     -}
6794     -
6795     struct pci_bridge_reg_behavior {
6796     /* Read-only bits */
6797     u32 ro;
6798     @@ -283,6 +260,61 @@ const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
6799     },
6800     };
6801    
6802     +/*
6803     + * Initialize a pci_bridge_emul structure to represent a fake PCI
6804     + * bridge configuration space. The caller needs to have initialized
6805     + * the PCI configuration space with whatever values make sense
6806     + * (typically at least vendor, device, revision), the ->ops pointer,
6807     + * and optionally ->data and ->has_pcie.
6808     + */
6809     +int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
6810     + unsigned int flags)
6811     +{
6812     + bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
6813     + bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
6814     + bridge->conf.cache_line_size = 0x10;
6815     + bridge->conf.status = PCI_STATUS_CAP_LIST;
6816     + bridge->pci_regs_behavior = kmemdup(pci_regs_behavior,
6817     + sizeof(pci_regs_behavior),
6818     + GFP_KERNEL);
6819     + if (!bridge->pci_regs_behavior)
6820     + return -ENOMEM;
6821     +
6822     + if (bridge->has_pcie) {
6823     + bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
6824     + bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
6825     + /* Set PCIe v2, root port, slot support */
6826     + bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
6827     + PCI_EXP_FLAGS_SLOT;
6828     + bridge->pcie_cap_regs_behavior =
6829     + kmemdup(pcie_cap_regs_behavior,
6830     + sizeof(pcie_cap_regs_behavior),
6831     + GFP_KERNEL);
6832     + if (!bridge->pcie_cap_regs_behavior) {
6833     + kfree(bridge->pci_regs_behavior);
6834     + return -ENOMEM;
6835     + }
6836     + }
6837     +
6838     + if (flags & PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR) {
6839     + bridge->pci_regs_behavior[PCI_PREF_MEMORY_BASE / 4].ro = ~0;
6840     + bridge->pci_regs_behavior[PCI_PREF_MEMORY_BASE / 4].rw = 0;
6841     + }
6842     +
6843     + return 0;
6844     +}
6845     +
6846     +/*
6847     + * Cleanup a pci_bridge_emul structure that was previously initilized
6848     + * using pci_bridge_emul_init().
6849     + */
6850     +void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge)
6851     +{
6852     + if (bridge->has_pcie)
6853     + kfree(bridge->pcie_cap_regs_behavior);
6854     + kfree(bridge->pci_regs_behavior);
6855     +}
6856     +
6857     /*
6858     * Should be called by the PCI controller driver when reading the PCI
6859     * configuration space of the fake bridge. It will call back the
6860     @@ -312,11 +344,11 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
6861     reg -= PCI_CAP_PCIE_START;
6862     read_op = bridge->ops->read_pcie;
6863     cfgspace = (u32 *) &bridge->pcie_conf;
6864     - behavior = pcie_cap_regs_behavior;
6865     + behavior = bridge->pcie_cap_regs_behavior;
6866     } else {
6867     read_op = bridge->ops->read_base;
6868     cfgspace = (u32 *) &bridge->conf;
6869     - behavior = pci_regs_behavior;
6870     + behavior = bridge->pci_regs_behavior;
6871     }
6872    
6873     if (read_op)
6874     @@ -383,11 +415,11 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
6875     reg -= PCI_CAP_PCIE_START;
6876     write_op = bridge->ops->write_pcie;
6877     cfgspace = (u32 *) &bridge->pcie_conf;
6878     - behavior = pcie_cap_regs_behavior;
6879     + behavior = bridge->pcie_cap_regs_behavior;
6880     } else {
6881     write_op = bridge->ops->write_base;
6882     cfgspace = (u32 *) &bridge->conf;
6883     - behavior = pci_regs_behavior;
6884     + behavior = bridge->pci_regs_behavior;
6885     }
6886    
6887     /* Keep all bits, except the RW bits */
6888     diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
6889     index 9d510ccf738b..e65b1b79899d 100644
6890     --- a/drivers/pci/pci-bridge-emul.h
6891     +++ b/drivers/pci/pci-bridge-emul.h
6892     @@ -107,15 +107,26 @@ struct pci_bridge_emul_ops {
6893     u32 old, u32 new, u32 mask);
6894     };
6895    
6896     +struct pci_bridge_reg_behavior;
6897     +
6898     struct pci_bridge_emul {
6899     struct pci_bridge_emul_conf conf;
6900     struct pci_bridge_emul_pcie_conf pcie_conf;
6901     struct pci_bridge_emul_ops *ops;
6902     + struct pci_bridge_reg_behavior *pci_regs_behavior;
6903     + struct pci_bridge_reg_behavior *pcie_cap_regs_behavior;
6904     void *data;
6905     bool has_pcie;
6906     };
6907    
6908     -void pci_bridge_emul_init(struct pci_bridge_emul *bridge);
6909     +enum {
6910     + PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR = BIT(0),
6911     +};
6912     +
6913     +int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
6914     + unsigned int flags);
6915     +void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge);
6916     +
6917     int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
6918     int size, u32 *value);
6919     int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
6920     diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
6921     index e435d12e61a0..7b77754a82de 100644
6922     --- a/drivers/pci/pcie/dpc.c
6923     +++ b/drivers/pci/pcie/dpc.c
6924     @@ -202,6 +202,28 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
6925     pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
6926     }
6927    
6928     +static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
6929     + struct aer_err_info *info)
6930     +{
6931     + int pos = dev->aer_cap;
6932     + u32 status, mask, sev;
6933     +
6934     + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
6935     + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
6936     + status &= ~mask;
6937     + if (!status)
6938     + return 0;
6939     +
6940     + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
6941     + status &= sev;
6942     + if (status)
6943     + info->severity = AER_FATAL;
6944     + else
6945     + info->severity = AER_NONFATAL;
6946     +
6947     + return 1;
6948     +}
6949     +
6950     static irqreturn_t dpc_handler(int irq, void *context)
6951     {
6952     struct aer_err_info info;
6953     @@ -229,9 +251,12 @@ static irqreturn_t dpc_handler(int irq, void *context)
6954     /* show RP PIO error detail information */
6955     if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
6956     dpc_process_rp_pio_error(dpc);
6957     - else if (reason == 0 && aer_get_device_error_info(pdev, &info)) {
6958     + else if (reason == 0 &&
6959     + dpc_get_aer_uncorrect_severity(pdev, &info) &&
6960     + aer_get_device_error_info(pdev, &info)) {
6961     aer_print_error(pdev, &info);
6962     pci_cleanup_aer_uncorrect_error_status(pdev);
6963     + pci_aer_clear_fatal_status(pdev);
6964     }
6965    
6966     /* We configure DPC so it only triggers on ERR_FATAL */
6967     diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
6968     index 257b9f6f2ebb..c46a3fcb341e 100644
6969     --- a/drivers/pci/probe.c
6970     +++ b/drivers/pci/probe.c
6971     @@ -2071,11 +2071,8 @@ static void pci_configure_ltr(struct pci_dev *dev)
6972     {
6973     #ifdef CONFIG_PCIEASPM
6974     struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
6975     - u32 cap;
6976     struct pci_dev *bridge;
6977     -
6978     - if (!host->native_ltr)
6979     - return;
6980     + u32 cap, ctl;
6981    
6982     if (!pci_is_pcie(dev))
6983     return;
6984     @@ -2084,22 +2081,35 @@ static void pci_configure_ltr(struct pci_dev *dev)
6985     if (!(cap & PCI_EXP_DEVCAP2_LTR))
6986     return;
6987    
6988     - /*
6989     - * Software must not enable LTR in an Endpoint unless the Root
6990     - * Complex and all intermediate Switches indicate support for LTR.
6991     - * PCIe r3.1, sec 6.18.
6992     - */
6993     - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
6994     - dev->ltr_path = 1;
6995     - else {
6996     + pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl);
6997     + if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
6998     + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
6999     + dev->ltr_path = 1;
7000     + return;
7001     + }
7002     +
7003     bridge = pci_upstream_bridge(dev);
7004     if (bridge && bridge->ltr_path)
7005     dev->ltr_path = 1;
7006     +
7007     + return;
7008     }
7009    
7010     - if (dev->ltr_path)
7011     + if (!host->native_ltr)
7012     + return;
7013     +
7014     + /*
7015     + * Software must not enable LTR in an Endpoint unless the Root
7016     + * Complex and all intermediate Switches indicate support for LTR.
7017     + * PCIe r4.0, sec 6.18.
7018     + */
7019     + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
7020     + ((bridge = pci_upstream_bridge(dev)) &&
7021     + bridge->ltr_path)) {
7022     pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
7023     PCI_EXP_DEVCTL2_LTR_EN);
7024     + dev->ltr_path = 1;
7025     + }
7026     #endif
7027     }
7028    
7029     diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
7030     index c843eaff8ad0..c3ed7b476676 100644
7031     --- a/drivers/power/supply/cpcap-charger.c
7032     +++ b/drivers/power/supply/cpcap-charger.c
7033     @@ -458,6 +458,7 @@ static void cpcap_usb_detect(struct work_struct *work)
7034     goto out_err;
7035     }
7036    
7037     + power_supply_changed(ddata->usb);
7038     return;
7039    
7040     out_err:
7041     diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
7042     index b94e3a721721..cd93cf53e23c 100644
7043     --- a/drivers/regulator/max77620-regulator.c
7044     +++ b/drivers/regulator/max77620-regulator.c
7045     @@ -1,7 +1,7 @@
7046     /*
7047     * Maxim MAX77620 Regulator driver
7048     *
7049     - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
7050     + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
7051     *
7052     * Author: Mallikarjun Kasoju <mkasoju@nvidia.com>
7053     * Laxman Dewangan <ldewangan@nvidia.com>
7054     @@ -803,6 +803,14 @@ static int max77620_regulator_probe(struct platform_device *pdev)
7055     rdesc = &rinfo[id].desc;
7056     pmic->rinfo[id] = &max77620_regs_info[id];
7057     pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL;
7058     + pmic->reg_pdata[id].active_fps_src = -1;
7059     + pmic->reg_pdata[id].active_fps_pd_slot = -1;
7060     + pmic->reg_pdata[id].active_fps_pu_slot = -1;
7061     + pmic->reg_pdata[id].suspend_fps_src = -1;
7062     + pmic->reg_pdata[id].suspend_fps_pd_slot = -1;
7063     + pmic->reg_pdata[id].suspend_fps_pu_slot = -1;
7064     + pmic->reg_pdata[id].power_ok = -1;
7065     + pmic->reg_pdata[id].ramp_rate_setting = -1;
7066    
7067     ret = max77620_read_slew_rate(pmic, id);
7068     if (ret < 0)
7069     diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
7070     index 095d25f3d2ea..58a1fe583a6c 100644
7071     --- a/drivers/regulator/s2mpa01.c
7072     +++ b/drivers/regulator/s2mpa01.c
7073     @@ -298,13 +298,13 @@ static const struct regulator_desc regulators[] = {
7074     regulator_desc_ldo(2, STEP_50_MV),
7075     regulator_desc_ldo(3, STEP_50_MV),
7076     regulator_desc_ldo(4, STEP_50_MV),
7077     - regulator_desc_ldo(5, STEP_50_MV),
7078     + regulator_desc_ldo(5, STEP_25_MV),
7079     regulator_desc_ldo(6, STEP_25_MV),
7080     regulator_desc_ldo(7, STEP_50_MV),
7081     regulator_desc_ldo(8, STEP_50_MV),
7082     regulator_desc_ldo(9, STEP_50_MV),
7083     regulator_desc_ldo(10, STEP_50_MV),
7084     - regulator_desc_ldo(11, STEP_25_MV),
7085     + regulator_desc_ldo(11, STEP_50_MV),
7086     regulator_desc_ldo(12, STEP_50_MV),
7087     regulator_desc_ldo(13, STEP_50_MV),
7088     regulator_desc_ldo(14, STEP_50_MV),
7089     @@ -315,11 +315,11 @@ static const struct regulator_desc regulators[] = {
7090     regulator_desc_ldo(19, STEP_50_MV),
7091     regulator_desc_ldo(20, STEP_50_MV),
7092     regulator_desc_ldo(21, STEP_50_MV),
7093     - regulator_desc_ldo(22, STEP_25_MV),
7094     - regulator_desc_ldo(23, STEP_25_MV),
7095     + regulator_desc_ldo(22, STEP_50_MV),
7096     + regulator_desc_ldo(23, STEP_50_MV),
7097     regulator_desc_ldo(24, STEP_50_MV),
7098     regulator_desc_ldo(25, STEP_50_MV),
7099     - regulator_desc_ldo(26, STEP_50_MV),
7100     + regulator_desc_ldo(26, STEP_25_MV),
7101     regulator_desc_buck1_4(1),
7102     regulator_desc_buck1_4(2),
7103     regulator_desc_buck1_4(3),
7104     diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
7105     index ee4a23ab0663..134c62db36c5 100644
7106     --- a/drivers/regulator/s2mps11.c
7107     +++ b/drivers/regulator/s2mps11.c
7108     @@ -362,7 +362,7 @@ static const struct regulator_desc s2mps11_regulators[] = {
7109     regulator_desc_s2mps11_ldo(32, STEP_50_MV),
7110     regulator_desc_s2mps11_ldo(33, STEP_50_MV),
7111     regulator_desc_s2mps11_ldo(34, STEP_50_MV),
7112     - regulator_desc_s2mps11_ldo(35, STEP_50_MV),
7113     + regulator_desc_s2mps11_ldo(35, STEP_25_MV),
7114     regulator_desc_s2mps11_ldo(36, STEP_50_MV),
7115     regulator_desc_s2mps11_ldo(37, STEP_50_MV),
7116     regulator_desc_s2mps11_ldo(38, STEP_50_MV),
7117     @@ -372,8 +372,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
7118     regulator_desc_s2mps11_buck1_4(4),
7119     regulator_desc_s2mps11_buck5,
7120     regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
7121     - regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
7122     - regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
7123     + regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
7124     + regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
7125     regulator_desc_s2mps11_buck9,
7126     regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
7127     };
7128     diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
7129     index 31c6c847eaca..e9824c35c34f 100644
7130     --- a/drivers/s390/crypto/vfio_ap_drv.c
7131     +++ b/drivers/s390/crypto/vfio_ap_drv.c
7132     @@ -15,7 +15,6 @@
7133     #include "vfio_ap_private.h"
7134    
7135     #define VFIO_AP_ROOT_NAME "vfio_ap"
7136     -#define VFIO_AP_DEV_TYPE_NAME "ap_matrix"
7137     #define VFIO_AP_DEV_NAME "matrix"
7138    
7139     MODULE_AUTHOR("IBM Corporation");
7140     @@ -24,10 +23,6 @@ MODULE_LICENSE("GPL v2");
7141    
7142     static struct ap_driver vfio_ap_drv;
7143    
7144     -static struct device_type vfio_ap_dev_type = {
7145     - .name = VFIO_AP_DEV_TYPE_NAME,
7146     -};
7147     -
7148     struct ap_matrix_dev *matrix_dev;
7149    
7150     /* Only type 10 adapters (CEX4 and later) are supported
7151     @@ -62,6 +57,22 @@ static void vfio_ap_matrix_dev_release(struct device *dev)
7152     kfree(matrix_dev);
7153     }
7154    
7155     +static int matrix_bus_match(struct device *dev, struct device_driver *drv)
7156     +{
7157     + return 1;
7158     +}
7159     +
7160     +static struct bus_type matrix_bus = {
7161     + .name = "matrix",
7162     + .match = &matrix_bus_match,
7163     +};
7164     +
7165     +static struct device_driver matrix_driver = {
7166     + .name = "vfio_ap",
7167     + .bus = &matrix_bus,
7168     + .suppress_bind_attrs = true,
7169     +};
7170     +
7171     static int vfio_ap_matrix_dev_create(void)
7172     {
7173     int ret;
7174     @@ -71,6 +82,10 @@ static int vfio_ap_matrix_dev_create(void)
7175     if (IS_ERR(root_device))
7176     return PTR_ERR(root_device);
7177    
7178     + ret = bus_register(&matrix_bus);
7179     + if (ret)
7180     + goto bus_register_err;
7181     +
7182     matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL);
7183     if (!matrix_dev) {
7184     ret = -ENOMEM;
7185     @@ -87,30 +102,41 @@ static int vfio_ap_matrix_dev_create(void)
7186     mutex_init(&matrix_dev->lock);
7187     INIT_LIST_HEAD(&matrix_dev->mdev_list);
7188    
7189     - matrix_dev->device.type = &vfio_ap_dev_type;
7190     dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
7191     matrix_dev->device.parent = root_device;
7192     + matrix_dev->device.bus = &matrix_bus;
7193     matrix_dev->device.release = vfio_ap_matrix_dev_release;
7194     - matrix_dev->device.driver = &vfio_ap_drv.driver;
7195     + matrix_dev->vfio_ap_drv = &vfio_ap_drv;
7196    
7197     ret = device_register(&matrix_dev->device);
7198     if (ret)
7199     goto matrix_reg_err;
7200    
7201     + ret = driver_register(&matrix_driver);
7202     + if (ret)
7203     + goto matrix_drv_err;
7204     +
7205     return 0;
7206    
7207     +matrix_drv_err:
7208     + device_unregister(&matrix_dev->device);
7209     matrix_reg_err:
7210     put_device(&matrix_dev->device);
7211     matrix_alloc_err:
7212     + bus_unregister(&matrix_bus);
7213     +bus_register_err:
7214     root_device_unregister(root_device);
7215     -
7216     return ret;
7217     }
7218    
7219     static void vfio_ap_matrix_dev_destroy(void)
7220     {
7221     + struct device *root_device = matrix_dev->device.parent;
7222     +
7223     + driver_unregister(&matrix_driver);
7224     device_unregister(&matrix_dev->device);
7225     - root_device_unregister(matrix_dev->device.parent);
7226     + bus_unregister(&matrix_bus);
7227     + root_device_unregister(root_device);
7228     }
7229    
7230     static int __init vfio_ap_init(void)
7231     diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
7232     index 272ef427dcc0..900b9cf20ca5 100644
7233     --- a/drivers/s390/crypto/vfio_ap_ops.c
7234     +++ b/drivers/s390/crypto/vfio_ap_ops.c
7235     @@ -198,8 +198,8 @@ static int vfio_ap_verify_queue_reserved(unsigned long *apid,
7236     qres.apqi = apqi;
7237     qres.reserved = false;
7238    
7239     - ret = driver_for_each_device(matrix_dev->device.driver, NULL, &qres,
7240     - vfio_ap_has_queue);
7241     + ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
7242     + &qres, vfio_ap_has_queue);
7243     if (ret)
7244     return ret;
7245    
7246     diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
7247     index 5675492233c7..76b7f98e47e9 100644
7248     --- a/drivers/s390/crypto/vfio_ap_private.h
7249     +++ b/drivers/s390/crypto/vfio_ap_private.h
7250     @@ -40,6 +40,7 @@ struct ap_matrix_dev {
7251     struct ap_config_info info;
7252     struct list_head mdev_list;
7253     struct mutex lock;
7254     + struct ap_driver *vfio_ap_drv;
7255     };
7256    
7257     extern struct ap_matrix_dev *matrix_dev;
7258     diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
7259     index ae1d56da671d..1a738fe9f26b 100644
7260     --- a/drivers/s390/virtio/virtio_ccw.c
7261     +++ b/drivers/s390/virtio/virtio_ccw.c
7262     @@ -272,6 +272,8 @@ static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
7263     {
7264     struct virtio_ccw_vq_info *info;
7265    
7266     + if (!vcdev->airq_info)
7267     + return;
7268     list_for_each_entry(info, &vcdev->virtqueues, node)
7269     drop_airq_indicator(info->vq, vcdev->airq_info);
7270     }
7271     @@ -413,7 +415,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
7272     ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
7273     if (ret)
7274     return ret;
7275     - return vcdev->config_block->num;
7276     + return vcdev->config_block->num ?: -ENOENT;
7277     }
7278    
7279     static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
7280     diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
7281     index 7e56a11836c1..ccefface7e31 100644
7282     --- a/drivers/scsi/aacraid/linit.c
7283     +++ b/drivers/scsi/aacraid/linit.c
7284     @@ -413,13 +413,16 @@ static int aac_slave_configure(struct scsi_device *sdev)
7285     if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
7286     devtype = aac->hba_map[chn][tid].devtype;
7287    
7288     - if (devtype == AAC_DEVTYPE_NATIVE_RAW)
7289     + if (devtype == AAC_DEVTYPE_NATIVE_RAW) {
7290     depth = aac->hba_map[chn][tid].qd_limit;
7291     - else if (devtype == AAC_DEVTYPE_ARC_RAW)
7292     + set_timeout = 1;
7293     + goto common_config;
7294     + }
7295     + if (devtype == AAC_DEVTYPE_ARC_RAW) {
7296     set_qd_dev_type = true;
7297     -
7298     - set_timeout = 1;
7299     - goto common_config;
7300     + set_timeout = 1;
7301     + goto common_config;
7302     + }
7303     }
7304    
7305     if (aac->jbod && (sdev->type == TYPE_DISK))
7306     diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
7307     index 8d1acc802a67..f44e640229e7 100644
7308     --- a/drivers/scsi/qla2xxx/qla_init.c
7309     +++ b/drivers/scsi/qla2xxx/qla_init.c
7310     @@ -644,11 +644,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
7311     break;
7312     case DSC_LS_PORT_UNAVAIL:
7313     default:
7314     - if (fcport->loop_id != FC_NO_LOOP_ID)
7315     - qla2x00_clear_loop_id(fcport);
7316     -
7317     - fcport->loop_id = loop_id;
7318     - fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
7319     + if (fcport->loop_id == FC_NO_LOOP_ID) {
7320     + qla2x00_find_new_loop_id(vha, fcport);
7321     + fcport->fw_login_state =
7322     + DSC_LS_PORT_UNAVAIL;
7323     + }
7324     + ql_dbg(ql_dbg_disc, vha, 0x20e5,
7325     + "%s %d %8phC\n", __func__, __LINE__,
7326     + fcport->port_name);
7327     qla24xx_fcport_handle_login(vha, fcport);
7328     break;
7329     }
7330     @@ -1471,29 +1474,6 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
7331     return 0;
7332     }
7333    
7334     -static
7335     -void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
7336     -{
7337     - fcport->rscn_gen++;
7338     -
7339     - ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
7340     - "%s %8phC DS %d LS %d\n",
7341     - __func__, fcport->port_name, fcport->disc_state,
7342     - fcport->fw_login_state);
7343     -
7344     - if (fcport->flags & FCF_ASYNC_SENT)
7345     - return;
7346     -
7347     - switch (fcport->disc_state) {
7348     - case DSC_DELETED:
7349     - case DSC_LOGIN_COMPLETE:
7350     - qla24xx_post_gpnid_work(fcport->vha, &ea->id);
7351     - break;
7352     - default:
7353     - break;
7354     - }
7355     -}
7356     -
7357     int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
7358     u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
7359     {
7360     @@ -1560,8 +1540,6 @@ static void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
7361    
7362     void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
7363     {
7364     - fc_port_t *f, *tf;
7365     - uint32_t id = 0, mask, rid;
7366     fc_port_t *fcport;
7367    
7368     switch (ea->event) {
7369     @@ -1574,10 +1552,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
7370     case FCME_RSCN:
7371     if (test_bit(UNLOADING, &vha->dpc_flags))
7372     return;
7373     - switch (ea->id.b.rsvd_1) {
7374     - case RSCN_PORT_ADDR:
7375     -#define BIGSCAN 1
7376     -#if defined BIGSCAN & BIGSCAN > 0
7377     {
7378     unsigned long flags;
7379     fcport = qla2x00_find_fcport_by_nportid
7380     @@ -1596,59 +1570,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
7381     }
7382     spin_unlock_irqrestore(&vha->work_lock, flags);
7383     }
7384     -#else
7385     - {
7386     - int rc;
7387     - fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
7388     - if (!fcport) {
7389     - /* cable moved */
7390     - rc = qla24xx_post_gpnid_work(vha, &ea->id);
7391     - if (rc) {
7392     - ql_log(ql_log_warn, vha, 0xd044,
7393     - "RSCN GPNID work failed %06x\n",
7394     - ea->id.b24);
7395     - }
7396     - } else {
7397     - ea->fcport = fcport;
7398     - fcport->scan_needed = 1;
7399     - qla24xx_handle_rscn_event(fcport, ea);
7400     - }
7401     - }
7402     -#endif
7403     - break;
7404     - case RSCN_AREA_ADDR:
7405     - case RSCN_DOM_ADDR:
7406     - if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
7407     - mask = 0xffff00;
7408     - ql_dbg(ql_dbg_async, vha, 0x5044,
7409     - "RSCN: Area 0x%06x was affected\n",
7410     - ea->id.b24);
7411     - } else {
7412     - mask = 0xff0000;
7413     - ql_dbg(ql_dbg_async, vha, 0x507a,
7414     - "RSCN: Domain 0x%06x was affected\n",
7415     - ea->id.b24);
7416     - }
7417     -
7418     - rid = ea->id.b24 & mask;
7419     - list_for_each_entry_safe(f, tf, &vha->vp_fcports,
7420     - list) {
7421     - id = f->d_id.b24 & mask;
7422     - if (rid == id) {
7423     - ea->fcport = f;
7424     - qla24xx_handle_rscn_event(f, ea);
7425     - }
7426     - }
7427     - break;
7428     - case RSCN_FAB_ADDR:
7429     - default:
7430     - ql_log(ql_log_warn, vha, 0xd045,
7431     - "RSCN: Fabric was affected. Addr format %d\n",
7432     - ea->id.b.rsvd_1);
7433     - qla2x00_mark_all_devices_lost(vha, 1);
7434     - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7435     - set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
7436     - }
7437     break;
7438     case FCME_GNL_DONE:
7439     qla24xx_handle_gnl_done_event(vha, ea);
7440     @@ -1709,11 +1630,7 @@ void qla_rscn_replay(fc_port_t *fcport)
7441     ea.event = FCME_RSCN;
7442     ea.id = fcport->d_id;
7443     ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
7444     -#if defined BIGSCAN & BIGSCAN > 0
7445     qla2x00_fcport_event_handler(fcport->vha, &ea);
7446     -#else
7447     - qla24xx_post_gpnid_work(fcport->vha, &ea.id);
7448     -#endif
7449     }
7450     }
7451    
7452     diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
7453     index 8507c43b918c..1a20e5d8f057 100644
7454     --- a/drivers/scsi/qla2xxx/qla_isr.c
7455     +++ b/drivers/scsi/qla2xxx/qla_isr.c
7456     @@ -3410,7 +3410,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
7457     min_vecs++;
7458     }
7459    
7460     - if (USER_CTRL_IRQ(ha)) {
7461     + if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
7462     /* user wants to control IRQ setting for target mode */
7463     ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
7464     ha->msix_count, PCI_IRQ_MSIX);
7465     diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
7466     index c6ef83d0d99b..7e35ce2162d0 100644
7467     --- a/drivers/scsi/qla2xxx/qla_os.c
7468     +++ b/drivers/scsi/qla2xxx/qla_os.c
7469     @@ -6936,7 +6936,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
7470     scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
7471     struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
7472    
7473     - if (USER_CTRL_IRQ(vha->hw))
7474     + if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
7475     rc = blk_mq_map_queues(qmap);
7476     else
7477     rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
7478     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
7479     index 5464d467e23e..b84099479fe0 100644
7480     --- a/drivers/scsi/sd.c
7481     +++ b/drivers/scsi/sd.c
7482     @@ -3047,6 +3047,55 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
7483     sdkp->security = 1;
7484     }
7485    
7486     +/*
7487     + * Determine the device's preferred I/O size for reads and writes
7488     + * unless the reported value is unreasonably small, large, not a
7489     + * multiple of the physical block size, or simply garbage.
7490     + */
7491     +static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
7492     + unsigned int dev_max)
7493     +{
7494     + struct scsi_device *sdp = sdkp->device;
7495     + unsigned int opt_xfer_bytes =
7496     + logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
7497     +
7498     + if (sdkp->opt_xfer_blocks > dev_max) {
7499     + sd_first_printk(KERN_WARNING, sdkp,
7500     + "Optimal transfer size %u logical blocks " \
7501     + "> dev_max (%u logical blocks)\n",
7502     + sdkp->opt_xfer_blocks, dev_max);
7503     + return false;
7504     + }
7505     +
7506     + if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
7507     + sd_first_printk(KERN_WARNING, sdkp,
7508     + "Optimal transfer size %u logical blocks " \
7509     + "> sd driver limit (%u logical blocks)\n",
7510     + sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
7511     + return false;
7512     + }
7513     +
7514     + if (opt_xfer_bytes < PAGE_SIZE) {
7515     + sd_first_printk(KERN_WARNING, sdkp,
7516     + "Optimal transfer size %u bytes < " \
7517     + "PAGE_SIZE (%u bytes)\n",
7518     + opt_xfer_bytes, (unsigned int)PAGE_SIZE);
7519     + return false;
7520     + }
7521     +
7522     + if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
7523     + sd_first_printk(KERN_WARNING, sdkp,
7524     + "Optimal transfer size %u bytes not a " \
7525     + "multiple of physical block size (%u bytes)\n",
7526     + opt_xfer_bytes, sdkp->physical_block_size);
7527     + return false;
7528     + }
7529     +
7530     + sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
7531     + opt_xfer_bytes);
7532     + return true;
7533     +}
7534     +
7535     /**
7536     * sd_revalidate_disk - called the first time a new disk is seen,
7537     * performs disk spin up, read_capacity, etc.
7538     @@ -3125,15 +3174,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
7539     dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
7540     q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
7541    
7542     - /*
7543     - * Determine the device's preferred I/O size for reads and writes
7544     - * unless the reported value is unreasonably small, large, or
7545     - * garbage.
7546     - */
7547     - if (sdkp->opt_xfer_blocks &&
7548     - sdkp->opt_xfer_blocks <= dev_max &&
7549     - sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
7550     - logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
7551     + if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
7552     q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
7553     rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
7554     } else
7555     diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
7556     index 772b976e4ee4..464cba521fb6 100644
7557     --- a/drivers/scsi/virtio_scsi.c
7558     +++ b/drivers/scsi/virtio_scsi.c
7559     @@ -594,7 +594,6 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
7560     return FAILED;
7561    
7562     memset(cmd, 0, sizeof(*cmd));
7563     - cmd->sc = sc;
7564     cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
7565     .type = VIRTIO_SCSI_T_TMF,
7566     .subtype = cpu_to_virtio32(vscsi->vdev,
7567     @@ -653,7 +652,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
7568     return FAILED;
7569    
7570     memset(cmd, 0, sizeof(*cmd));
7571     - cmd->sc = sc;
7572     cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
7573     .type = VIRTIO_SCSI_T_TMF,
7574     .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
7575     diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
7576     index c7beb6841289..ab8f731a3426 100644
7577     --- a/drivers/soc/qcom/rpmh.c
7578     +++ b/drivers/soc/qcom/rpmh.c
7579     @@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
7580     struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
7581     msg);
7582     struct completion *compl = rpm_msg->completion;
7583     + bool free = rpm_msg->needs_free;
7584    
7585     rpm_msg->err = r;
7586    
7587     @@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
7588     complete(compl);
7589    
7590     exit:
7591     - if (rpm_msg->needs_free)
7592     + if (free)
7593     kfree(rpm_msg);
7594     }
7595    
7596     @@ -348,11 +349,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
7597     {
7598     struct batch_cache_req *req;
7599     struct rpmh_request *rpm_msgs;
7600     - DECLARE_COMPLETION_ONSTACK(compl);
7601     + struct completion *compls;
7602     struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
7603     unsigned long time_left;
7604     int count = 0;
7605     - int ret, i, j;
7606     + int ret, i;
7607     + void *ptr;
7608    
7609     if (!cmd || !n)
7610     return -EINVAL;
7611     @@ -362,10 +364,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
7612     if (!count)
7613     return -EINVAL;
7614    
7615     - req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
7616     + ptr = kzalloc(sizeof(*req) +
7617     + count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
7618     GFP_ATOMIC);
7619     - if (!req)
7620     + if (!ptr)
7621     return -ENOMEM;
7622     +
7623     + req = ptr;
7624     + compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
7625     +
7626     req->count = count;
7627     rpm_msgs = req->rpm_msgs;
7628    
7629     @@ -380,25 +387,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
7630     }
7631    
7632     for (i = 0; i < count; i++) {
7633     - rpm_msgs[i].completion = &compl;
7634     + struct completion *compl = &compls[i];
7635     +
7636     + init_completion(compl);
7637     + rpm_msgs[i].completion = compl;
7638     ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
7639     if (ret) {
7640     pr_err("Error(%d) sending RPMH message addr=%#x\n",
7641     ret, rpm_msgs[i].msg.cmds[0].addr);
7642     - for (j = i; j < count; j++)
7643     - rpmh_tx_done(&rpm_msgs[j].msg, ret);
7644     break;
7645     }
7646     }
7647    
7648     time_left = RPMH_TIMEOUT_MS;
7649     - for (i = 0; i < count; i++) {
7650     - time_left = wait_for_completion_timeout(&compl, time_left);
7651     + while (i--) {
7652     + time_left = wait_for_completion_timeout(&compls[i], time_left);
7653     if (!time_left) {
7654     /*
7655     * Better hope they never finish because they'll signal
7656     - * the completion on our stack and that's bad once
7657     - * we've returned from the function.
7658     + * the completion that we're going to free once
7659     + * we've returned from this function.
7660     */
7661     WARN_ON(1);
7662     ret = -ETIMEDOUT;
7663     @@ -407,7 +415,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
7664     }
7665    
7666     exit:
7667     - kfree(req);
7668     + kfree(ptr);
7669    
7670     return ret;
7671     }
7672     diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
7673     index a4aee26028cd..53b35c56a557 100644
7674     --- a/drivers/spi/spi-gpio.c
7675     +++ b/drivers/spi/spi-gpio.c
7676     @@ -428,7 +428,8 @@ static int spi_gpio_probe(struct platform_device *pdev)
7677     return status;
7678    
7679     master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
7680     - master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL;
7681     + master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
7682     + SPI_CS_HIGH;
7683     master->flags = master_flags;
7684     master->bus_num = pdev->id;
7685     /* The master needs to think there is a chipselect even if not connected */
7686     @@ -455,7 +456,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
7687     spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
7688     }
7689     spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
7690     - spi_gpio->bitbang.flags = SPI_CS_HIGH;
7691    
7692     status = spi_bitbang_start(&spi_gpio->bitbang);
7693     if (status)
7694     diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
7695     index 2fd8881fcd65..8be304379628 100644
7696     --- a/drivers/spi/spi-omap2-mcspi.c
7697     +++ b/drivers/spi/spi-omap2-mcspi.c
7698     @@ -623,8 +623,8 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
7699     cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
7700     cfg.src_addr_width = width;
7701     cfg.dst_addr_width = width;
7702     - cfg.src_maxburst = es;
7703     - cfg.dst_maxburst = es;
7704     + cfg.src_maxburst = 1;
7705     + cfg.dst_maxburst = 1;
7706    
7707     rx = xfer->rx_buf;
7708     tx = xfer->tx_buf;
7709     diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
7710     index d84b893a64d7..3e82eaad0f2d 100644
7711     --- a/drivers/spi/spi-pxa2xx.c
7712     +++ b/drivers/spi/spi-pxa2xx.c
7713     @@ -1696,6 +1696,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
7714     platform_info->enable_dma = false;
7715     } else {
7716     master->can_dma = pxa2xx_spi_can_dma;
7717     + master->max_dma_len = MAX_DMA_LEN;
7718     }
7719     }
7720    
7721     diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
7722     index 5f19016bbf10..b9fb6493cd6b 100644
7723     --- a/drivers/spi/spi-ti-qspi.c
7724     +++ b/drivers/spi/spi-ti-qspi.c
7725     @@ -490,8 +490,8 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi)
7726     ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
7727     if (qspi->ctrl_base) {
7728     regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
7729     - MEM_CS_EN(spi->chip_select),
7730     - MEM_CS_MASK);
7731     + MEM_CS_MASK,
7732     + MEM_CS_EN(spi->chip_select));
7733     }
7734     qspi->mmap_enabled = true;
7735     }
7736     @@ -503,7 +503,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi)
7737     ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
7738     if (qspi->ctrl_base)
7739     regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
7740     - 0, MEM_CS_MASK);
7741     + MEM_CS_MASK, 0);
7742     qspi->mmap_enabled = false;
7743     }
7744    
7745     diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
7746     index 28f41caba05d..fb442499f806 100644
7747     --- a/drivers/staging/media/imx/imx-ic-prpencvf.c
7748     +++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
7749     @@ -680,12 +680,23 @@ static int prp_start(struct prp_priv *priv)
7750     goto out_free_nfb4eof_irq;
7751     }
7752    
7753     + /* start upstream */
7754     + ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
7755     + ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
7756     + if (ret) {
7757     + v4l2_err(&ic_priv->sd,
7758     + "upstream stream on failed: %d\n", ret);
7759     + goto out_free_eof_irq;
7760     + }
7761     +
7762     /* start the EOF timeout timer */
7763     mod_timer(&priv->eof_timeout_timer,
7764     jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
7765    
7766     return 0;
7767    
7768     +out_free_eof_irq:
7769     + devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
7770     out_free_nfb4eof_irq:
7771     devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
7772     out_unsetup:
7773     @@ -717,6 +728,12 @@ static void prp_stop(struct prp_priv *priv)
7774     if (ret == 0)
7775     v4l2_warn(&ic_priv->sd, "wait last EOF timeout\n");
7776    
7777     + /* stop upstream */
7778     + ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
7779     + if (ret && ret != -ENOIOCTLCMD)
7780     + v4l2_warn(&ic_priv->sd,
7781     + "upstream stream off failed: %d\n", ret);
7782     +
7783     devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
7784     devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
7785    
7786     @@ -1148,15 +1165,6 @@ static int prp_s_stream(struct v4l2_subdev *sd, int enable)
7787     if (ret)
7788     goto out;
7789    
7790     - /* start/stop upstream */
7791     - ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable);
7792     - ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
7793     - if (ret) {
7794     - if (enable)
7795     - prp_stop(priv);
7796     - goto out;
7797     - }
7798     -
7799     update_count:
7800     priv->stream_count += enable ? 1 : -1;
7801     if (priv->stream_count < 0)
7802     diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
7803     index 4223f8d418ae..be1e9e52b2a0 100644
7804     --- a/drivers/staging/media/imx/imx-media-csi.c
7805     +++ b/drivers/staging/media/imx/imx-media-csi.c
7806     @@ -629,7 +629,7 @@ out_put_ipu:
7807     return ret;
7808     }
7809    
7810     -static void csi_idmac_stop(struct csi_priv *priv)
7811     +static void csi_idmac_wait_last_eof(struct csi_priv *priv)
7812     {
7813     unsigned long flags;
7814     int ret;
7815     @@ -646,7 +646,10 @@ static void csi_idmac_stop(struct csi_priv *priv)
7816     &priv->last_eof_comp, msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
7817     if (ret == 0)
7818     v4l2_warn(&priv->sd, "wait last EOF timeout\n");
7819     +}
7820    
7821     +static void csi_idmac_stop(struct csi_priv *priv)
7822     +{
7823     devm_free_irq(priv->dev, priv->eof_irq, priv);
7824     devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
7825    
7826     @@ -722,10 +725,16 @@ static int csi_start(struct csi_priv *priv)
7827    
7828     output_fi = &priv->frame_interval[priv->active_output_pad];
7829    
7830     + /* start upstream */
7831     + ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
7832     + ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
7833     + if (ret)
7834     + return ret;
7835     +
7836     if (priv->dest == IPU_CSI_DEST_IDMAC) {
7837     ret = csi_idmac_start(priv);
7838     if (ret)
7839     - return ret;
7840     + goto stop_upstream;
7841     }
7842    
7843     ret = csi_setup(priv);
7844     @@ -753,11 +762,26 @@ fim_off:
7845     idmac_stop:
7846     if (priv->dest == IPU_CSI_DEST_IDMAC)
7847     csi_idmac_stop(priv);
7848     +stop_upstream:
7849     + v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
7850     return ret;
7851     }
7852    
7853     static void csi_stop(struct csi_priv *priv)
7854     {
7855     + if (priv->dest == IPU_CSI_DEST_IDMAC)
7856     + csi_idmac_wait_last_eof(priv);
7857     +
7858     + /*
7859     + * Disable the CSI asap, after syncing with the last EOF.
7860     + * Doing so after the IDMA channel is disabled has shown to
7861     + * create hard system-wide hangs.
7862     + */
7863     + ipu_csi_disable(priv->csi);
7864     +
7865     + /* stop upstream */
7866     + v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
7867     +
7868     if (priv->dest == IPU_CSI_DEST_IDMAC) {
7869     csi_idmac_stop(priv);
7870    
7871     @@ -765,8 +789,6 @@ static void csi_stop(struct csi_priv *priv)
7872     if (priv->fim)
7873     imx_media_fim_set_stream(priv->fim, NULL, false);
7874     }
7875     -
7876     - ipu_csi_disable(priv->csi);
7877     }
7878    
7879     static const struct csi_skip_desc csi_skip[12] = {
7880     @@ -927,23 +949,13 @@ static int csi_s_stream(struct v4l2_subdev *sd, int enable)
7881     goto update_count;
7882    
7883     if (enable) {
7884     - /* upstream must be started first, before starting CSI */
7885     - ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
7886     - ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
7887     - if (ret)
7888     - goto out;
7889     -
7890     dev_dbg(priv->dev, "stream ON\n");
7891     ret = csi_start(priv);
7892     - if (ret) {
7893     - v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
7894     + if (ret)
7895     goto out;
7896     - }
7897     } else {
7898     dev_dbg(priv->dev, "stream OFF\n");
7899     - /* CSI must be stopped first, then stop upstream */
7900     csi_stop(priv);
7901     - v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
7902     }
7903    
7904     update_count:
7905     @@ -1787,7 +1799,7 @@ static int imx_csi_parse_endpoint(struct device *dev,
7906     struct v4l2_fwnode_endpoint *vep,
7907     struct v4l2_async_subdev *asd)
7908     {
7909     - return fwnode_device_is_available(asd->match.fwnode) ? 0 : -EINVAL;
7910     + return fwnode_device_is_available(asd->match.fwnode) ? 0 : -ENOTCONN;
7911     }
7912    
7913     static int imx_csi_async_register(struct csi_priv *priv)
7914     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
7915     index bd15a564fe24..3ad2659630e8 100644
7916     --- a/drivers/target/iscsi/iscsi_target.c
7917     +++ b/drivers/target/iscsi/iscsi_target.c
7918     @@ -4040,9 +4040,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
7919     struct se_cmd *se_cmd = &cmd->se_cmd;
7920    
7921     if (se_cmd->se_tfo != NULL) {
7922     - spin_lock(&se_cmd->t_state_lock);
7923     + spin_lock_irq(&se_cmd->t_state_lock);
7924     se_cmd->transport_state |= CMD_T_FABRIC_STOP;
7925     - spin_unlock(&se_cmd->t_state_lock);
7926     + spin_unlock_irq(&se_cmd->t_state_lock);
7927     }
7928     }
7929     spin_unlock_bh(&conn->cmd_lock);
7930     diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
7931     index a1a85805d010..2488de1c4bc4 100644
7932     --- a/drivers/tty/serial/8250/8250_of.c
7933     +++ b/drivers/tty/serial/8250/8250_of.c
7934     @@ -130,6 +130,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
7935     port->flags |= UPF_IOREMAP;
7936     }
7937    
7938     + /* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */
7939     + if (of_device_is_compatible(np, "mrvl,mmp-uart"))
7940     + port->regshift = 2;
7941     +
7942     /* Check for registers offset within the devices address range */
7943     if (of_property_read_u32(np, "reg-shift", &prop) == 0)
7944     port->regshift = prop;
7945     diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
7946     index 48bd694a5fa1..bbe5cba21522 100644
7947     --- a/drivers/tty/serial/8250/8250_pci.c
7948     +++ b/drivers/tty/serial/8250/8250_pci.c
7949     @@ -2027,6 +2027,111 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
7950     .setup = pci_default_setup,
7951     .exit = pci_plx9050_exit,
7952     },
7953     + {
7954     + .vendor = PCI_VENDOR_ID_ACCESIO,
7955     + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
7956     + .subvendor = PCI_ANY_ID,
7957     + .subdevice = PCI_ANY_ID,
7958     + .setup = pci_pericom_setup,
7959     + },
7960     + {
7961     + .vendor = PCI_VENDOR_ID_ACCESIO,
7962     + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
7963     + .subvendor = PCI_ANY_ID,
7964     + .subdevice = PCI_ANY_ID,
7965     + .setup = pci_pericom_setup,
7966     + },
7967     + {
7968     + .vendor = PCI_VENDOR_ID_ACCESIO,
7969     + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
7970     + .subvendor = PCI_ANY_ID,
7971     + .subdevice = PCI_ANY_ID,
7972     + .setup = pci_pericom_setup,
7973     + },
7974     + {
7975     + .vendor = PCI_VENDOR_ID_ACCESIO,
7976     + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
7977     + .subvendor = PCI_ANY_ID,
7978     + .subdevice = PCI_ANY_ID,
7979     + .setup = pci_pericom_setup,
7980     + },
7981     + {
7982     + .vendor = PCI_VENDOR_ID_ACCESIO,
7983     + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
7984     + .subvendor = PCI_ANY_ID,
7985     + .subdevice = PCI_ANY_ID,
7986     + .setup = pci_pericom_setup,
7987     + },
7988     + {
7989     + .vendor = PCI_VENDOR_ID_ACCESIO,
7990     + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
7991     + .subvendor = PCI_ANY_ID,
7992     + .subdevice = PCI_ANY_ID,
7993     + .setup = pci_pericom_setup,
7994     + },
7995     + {
7996     + .vendor = PCI_VENDOR_ID_ACCESIO,
7997     + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
7998     + .subvendor = PCI_ANY_ID,
7999     + .subdevice = PCI_ANY_ID,
8000     + .setup = pci_pericom_setup,
8001     + },
8002     + {
8003     + .vendor = PCI_VENDOR_ID_ACCESIO,
8004     + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
8005     + .subvendor = PCI_ANY_ID,
8006     + .subdevice = PCI_ANY_ID,
8007     + .setup = pci_pericom_setup,
8008     + },
8009     + {
8010     + .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
8011     + .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
8012     + .subvendor = PCI_ANY_ID,
8013     + .subdevice = PCI_ANY_ID,
8014     + .setup = pci_pericom_setup,
8015     + },
8016     + {
8017     + .vendor = PCI_VENDOR_ID_ACCESIO,
8018     + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
8019     + .subvendor = PCI_ANY_ID,
8020     + .subdevice = PCI_ANY_ID,
8021     + .setup = pci_pericom_setup,
8022     + },
8023     + {
8024     + .vendor = PCI_VENDOR_ID_ACCESIO,
8025     + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
8026     + .subvendor = PCI_ANY_ID,
8027     + .subdevice = PCI_ANY_ID,
8028     + .setup = pci_pericom_setup,
8029     + },
8030     + {
8031     + .vendor = PCI_VENDOR_ID_ACCESIO,
8032     + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
8033     + .subvendor = PCI_ANY_ID,
8034     + .subdevice = PCI_ANY_ID,
8035     + .setup = pci_pericom_setup,
8036     + },
8037     + {
8038     + .vendor = PCI_VENDOR_ID_ACCESIO,
8039     + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
8040     + .subvendor = PCI_ANY_ID,
8041     + .subdevice = PCI_ANY_ID,
8042     + .setup = pci_pericom_setup,
8043     + },
8044     + {
8045     + .vendor = PCI_VENDOR_ID_ACCESIO,
8046     + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
8047     + .subvendor = PCI_ANY_ID,
8048     + .subdevice = PCI_ANY_ID,
8049     + .setup = pci_pericom_setup,
8050     + },
8051     + {
8052     + .vendor = PCI_VENDOR_ID_ACCESIO,
8053     + .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
8054     + .subvendor = PCI_ANY_ID,
8055     + .subdevice = PCI_ANY_ID,
8056     + .setup = pci_pericom_setup,
8057     + },
8058     /*
8059     * SBS Technologies, Inc., PMC-OCTALPRO 232
8060     */
8061     @@ -4575,10 +4680,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
8062     */
8063     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
8064     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8065     - pbn_pericom_PI7C9X7954 },
8066     + pbn_pericom_PI7C9X7952 },
8067     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
8068     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8069     - pbn_pericom_PI7C9X7954 },
8070     + pbn_pericom_PI7C9X7952 },
8071     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
8072     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8073     pbn_pericom_PI7C9X7954 },
8074     @@ -4587,10 +4692,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
8075     pbn_pericom_PI7C9X7954 },
8076     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
8077     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8078     - pbn_pericom_PI7C9X7954 },
8079     + pbn_pericom_PI7C9X7952 },
8080     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
8081     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8082     - pbn_pericom_PI7C9X7954 },
8083     + pbn_pericom_PI7C9X7952 },
8084     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
8085     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8086     pbn_pericom_PI7C9X7954 },
8087     @@ -4599,10 +4704,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
8088     pbn_pericom_PI7C9X7954 },
8089     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
8090     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8091     - pbn_pericom_PI7C9X7954 },
8092     + pbn_pericom_PI7C9X7952 },
8093     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
8094     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8095     - pbn_pericom_PI7C9X7954 },
8096     + pbn_pericom_PI7C9X7952 },
8097     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
8098     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8099     pbn_pericom_PI7C9X7954 },
8100     @@ -4611,13 +4716,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
8101     pbn_pericom_PI7C9X7954 },
8102     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
8103     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8104     - pbn_pericom_PI7C9X7954 },
8105     + pbn_pericom_PI7C9X7951 },
8106     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
8107     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8108     - pbn_pericom_PI7C9X7954 },
8109     + pbn_pericom_PI7C9X7952 },
8110     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
8111     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8112     - pbn_pericom_PI7C9X7954 },
8113     + pbn_pericom_PI7C9X7952 },
8114     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
8115     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8116     pbn_pericom_PI7C9X7954 },
8117     @@ -4626,16 +4731,16 @@ static const struct pci_device_id serial_pci_tbl[] = {
8118     pbn_pericom_PI7C9X7954 },
8119     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
8120     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8121     - pbn_pericom_PI7C9X7954 },
8122     + pbn_pericom_PI7C9X7952 },
8123     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
8124     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8125     pbn_pericom_PI7C9X7954 },
8126     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
8127     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8128     - pbn_pericom_PI7C9X7954 },
8129     + pbn_pericom_PI7C9X7952 },
8130     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
8131     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8132     - pbn_pericom_PI7C9X7954 },
8133     + pbn_pericom_PI7C9X7952 },
8134     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
8135     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8136     pbn_pericom_PI7C9X7954 },
8137     @@ -4644,13 +4749,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
8138     pbn_pericom_PI7C9X7954 },
8139     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
8140     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8141     - pbn_pericom_PI7C9X7954 },
8142     + pbn_pericom_PI7C9X7952 },
8143     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
8144     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8145     - pbn_pericom_PI7C9X7958 },
8146     + pbn_pericom_PI7C9X7954 },
8147     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
8148     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8149     - pbn_pericom_PI7C9X7958 },
8150     + pbn_pericom_PI7C9X7954 },
8151     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
8152     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8153     pbn_pericom_PI7C9X7958 },
8154     @@ -4659,19 +4764,19 @@ static const struct pci_device_id serial_pci_tbl[] = {
8155     pbn_pericom_PI7C9X7958 },
8156     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
8157     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8158     - pbn_pericom_PI7C9X7958 },
8159     + pbn_pericom_PI7C9X7954 },
8160     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
8161     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8162     pbn_pericom_PI7C9X7958 },
8163     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
8164     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8165     - pbn_pericom_PI7C9X7958 },
8166     + pbn_pericom_PI7C9X7954 },
8167     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
8168     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8169     pbn_pericom_PI7C9X7958 },
8170     { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
8171     PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8172     - pbn_pericom_PI7C9X7958 },
8173     + pbn_pericom_PI7C9X7954 },
8174     /*
8175     * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
8176     */
8177     diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
8178     index 094f2958cb2b..ee9f18c52d29 100644
8179     --- a/drivers/tty/serial/xilinx_uartps.c
8180     +++ b/drivers/tty/serial/xilinx_uartps.c
8181     @@ -364,7 +364,13 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
8182     cdns_uart_handle_tx(dev_id);
8183     isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
8184     }
8185     - if (isrstatus & CDNS_UART_IXR_RXMASK)
8186     +
8187     + /*
8188     + * Skip RX processing if RX is disabled as RXEMPTY will never be set
8189     + * as read bytes will not be removed from the FIFO.
8190     + */
8191     + if (isrstatus & CDNS_UART_IXR_RXMASK &&
8192     + !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
8193     cdns_uart_handle_rx(dev_id, isrstatus);
8194    
8195     spin_unlock(&port->lock);
8196     diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
8197     index bba75560d11e..9646ff63e77a 100644
8198     --- a/drivers/tty/vt/vt.c
8199     +++ b/drivers/tty/vt/vt.c
8200     @@ -935,8 +935,11 @@ static void flush_scrollback(struct vc_data *vc)
8201     {
8202     WARN_CONSOLE_UNLOCKED();
8203    
8204     + set_origin(vc);
8205     if (vc->vc_sw->con_flush_scrollback)
8206     vc->vc_sw->con_flush_scrollback(vc);
8207     + else
8208     + vc->vc_sw->con_switch(vc);
8209     }
8210    
8211     /*
8212     @@ -1503,8 +1506,10 @@ static void csi_J(struct vc_data *vc, int vpar)
8213     count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
8214     start = (unsigned short *)vc->vc_origin;
8215     break;
8216     + case 3: /* include scrollback */
8217     + flush_scrollback(vc);
8218     + /* fallthrough */
8219     case 2: /* erase whole display */
8220     - case 3: /* (and scrollback buffer later) */
8221     vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
8222     count = vc->vc_cols * vc->vc_rows;
8223     start = (unsigned short *)vc->vc_origin;
8224     @@ -1513,13 +1518,7 @@ static void csi_J(struct vc_data *vc, int vpar)
8225     return;
8226     }
8227     scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
8228     - if (vpar == 3) {
8229     - set_origin(vc);
8230     - flush_scrollback(vc);
8231     - if (con_is_visible(vc))
8232     - update_screen(vc);
8233     - } else if (con_should_update(vc))
8234     - do_update_region(vc, (unsigned long) start, count);
8235     + update_region(vc, (unsigned long) start, count);
8236     vc->vc_need_wrap = 0;
8237     }
8238    
8239     diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
8240     index 772851bee99b..12025358bb3c 100644
8241     --- a/drivers/usb/chipidea/ci_hdrc_tegra.c
8242     +++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
8243     @@ -130,6 +130,7 @@ static int tegra_udc_remove(struct platform_device *pdev)
8244     {
8245     struct tegra_udc *udc = platform_get_drvdata(pdev);
8246    
8247     + ci_hdrc_remove_device(udc->dev);
8248     usb_phy_set_suspend(udc->phy, 1);
8249     clk_disable_unprepare(udc->clk);
8250    
8251     diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
8252     index 1c0033ad8738..e1109b15636d 100644
8253     --- a/drivers/usb/typec/tps6598x.c
8254     +++ b/drivers/usb/typec/tps6598x.c
8255     @@ -110,6 +110,20 @@ tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
8256     return 0;
8257     }
8258    
8259     +static int tps6598x_block_write(struct tps6598x *tps, u8 reg,
8260     + void *val, size_t len)
8261     +{
8262     + u8 data[TPS_MAX_LEN + 1];
8263     +
8264     + if (!tps->i2c_protocol)
8265     + return regmap_raw_write(tps->regmap, reg, val, len);
8266     +
8267     + data[0] = len;
8268     + memcpy(&data[1], val, len);
8269     +
8270     + return regmap_raw_write(tps->regmap, reg, data, sizeof(data));
8271     +}
8272     +
8273     static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val)
8274     {
8275     return tps6598x_block_read(tps, reg, val, sizeof(u16));
8276     @@ -127,23 +141,23 @@ static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val)
8277    
8278     static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val)
8279     {
8280     - return regmap_raw_write(tps->regmap, reg, &val, sizeof(u16));
8281     + return tps6598x_block_write(tps, reg, &val, sizeof(u16));
8282     }
8283    
8284     static inline int tps6598x_write32(struct tps6598x *tps, u8 reg, u32 val)
8285     {
8286     - return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
8287     + return tps6598x_block_write(tps, reg, &val, sizeof(u32));
8288     }
8289    
8290     static inline int tps6598x_write64(struct tps6598x *tps, u8 reg, u64 val)
8291     {
8292     - return regmap_raw_write(tps->regmap, reg, &val, sizeof(u64));
8293     + return tps6598x_block_write(tps, reg, &val, sizeof(u64));
8294     }
8295    
8296     static inline int
8297     tps6598x_write_4cc(struct tps6598x *tps, u8 reg, const char *val)
8298     {
8299     - return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
8300     + return tps6598x_block_write(tps, reg, &val, sizeof(u32));
8301     }
8302    
8303     static int tps6598x_read_partner_identity(struct tps6598x *tps)
8304     @@ -229,8 +243,8 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
8305     return -EBUSY;
8306    
8307     if (in_len) {
8308     - ret = regmap_raw_write(tps->regmap, TPS_REG_DATA1,
8309     - in_data, in_len);
8310     + ret = tps6598x_block_write(tps, TPS_REG_DATA1,
8311     + in_data, in_len);
8312     if (ret)
8313     return ret;
8314     }
8315     diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
8316     index 5a0db6dec8d1..aaee1e6584e6 100644
8317     --- a/fs/9p/v9fs_vfs.h
8318     +++ b/fs/9p/v9fs_vfs.h
8319     @@ -40,6 +40,9 @@
8320     */
8321     #define P9_LOCK_TIMEOUT (30*HZ)
8322    
8323     +/* flags for v9fs_stat2inode() & v9fs_stat2inode_dotl() */
8324     +#define V9FS_STAT2INODE_KEEP_ISIZE 1
8325     +
8326     extern struct file_system_type v9fs_fs_type;
8327     extern const struct address_space_operations v9fs_addr_operations;
8328     extern const struct file_operations v9fs_file_operations;
8329     @@ -61,8 +64,10 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
8330     struct inode *inode, umode_t mode, dev_t);
8331     void v9fs_evict_inode(struct inode *inode);
8332     ino_t v9fs_qid2ino(struct p9_qid *qid);
8333     -void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
8334     -void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *);
8335     +void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
8336     + struct super_block *sb, unsigned int flags);
8337     +void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
8338     + unsigned int flags);
8339     int v9fs_dir_release(struct inode *inode, struct file *filp);
8340     int v9fs_file_open(struct inode *inode, struct file *file);
8341     void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
8342     @@ -83,4 +88,18 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
8343     }
8344    
8345     int v9fs_open_to_dotl_flags(int flags);
8346     +
8347     +static inline void v9fs_i_size_write(struct inode *inode, loff_t i_size)
8348     +{
8349     + /*
8350     + * 32-bit need the lock, concurrent updates could break the
8351     + * sequences and make i_size_read() loop forever.
8352     + * 64-bit updates are atomic and can skip the locking.
8353     + */
8354     + if (sizeof(i_size) > sizeof(long))
8355     + spin_lock(&inode->i_lock);
8356     + i_size_write(inode, i_size);
8357     + if (sizeof(i_size) > sizeof(long))
8358     + spin_unlock(&inode->i_lock);
8359     +}
8360     #endif
8361     diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
8362     index a25efa782fcc..9a1125305d84 100644
8363     --- a/fs/9p/vfs_file.c
8364     +++ b/fs/9p/vfs_file.c
8365     @@ -446,7 +446,11 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
8366     i_size = i_size_read(inode);
8367     if (iocb->ki_pos > i_size) {
8368     inode_add_bytes(inode, iocb->ki_pos - i_size);
8369     - i_size_write(inode, iocb->ki_pos);
8370     + /*
8371     + * Need to serialize against i_size_write() in
8372     + * v9fs_stat2inode()
8373     + */
8374     + v9fs_i_size_write(inode, iocb->ki_pos);
8375     }
8376     return retval;
8377     }
8378     diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
8379     index 85ff859d3af5..72b779bc0942 100644
8380     --- a/fs/9p/vfs_inode.c
8381     +++ b/fs/9p/vfs_inode.c
8382     @@ -538,7 +538,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
8383     if (retval)
8384     goto error;
8385    
8386     - v9fs_stat2inode(st, inode, sb);
8387     + v9fs_stat2inode(st, inode, sb, 0);
8388     v9fs_cache_inode_get_cookie(inode);
8389     unlock_new_inode(inode);
8390     return inode;
8391     @@ -1092,7 +1092,7 @@ v9fs_vfs_getattr(const struct path *path, struct kstat *stat,
8392     if (IS_ERR(st))
8393     return PTR_ERR(st);
8394    
8395     - v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb);
8396     + v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
8397     generic_fillattr(d_inode(dentry), stat);
8398    
8399     p9stat_free(st);
8400     @@ -1170,12 +1170,13 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
8401     * @stat: Plan 9 metadata (mistat) structure
8402     * @inode: inode to populate
8403     * @sb: superblock of filesystem
8404     + * @flags: control flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
8405     *
8406     */
8407    
8408     void
8409     v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
8410     - struct super_block *sb)
8411     + struct super_block *sb, unsigned int flags)
8412     {
8413     umode_t mode;
8414     char ext[32];
8415     @@ -1216,10 +1217,11 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
8416     mode = p9mode2perm(v9ses, stat);
8417     mode |= inode->i_mode & ~S_IALLUGO;
8418     inode->i_mode = mode;
8419     - i_size_write(inode, stat->length);
8420    
8421     + if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
8422     + v9fs_i_size_write(inode, stat->length);
8423     /* not real number of blocks, but 512 byte ones ... */
8424     - inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
8425     + inode->i_blocks = (stat->length + 512 - 1) >> 9;
8426     v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
8427     }
8428    
8429     @@ -1416,9 +1418,9 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
8430     {
8431     int umode;
8432     dev_t rdev;
8433     - loff_t i_size;
8434     struct p9_wstat *st;
8435     struct v9fs_session_info *v9ses;
8436     + unsigned int flags;
8437    
8438     v9ses = v9fs_inode2v9ses(inode);
8439     st = p9_client_stat(fid);
8440     @@ -1431,16 +1433,13 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
8441     if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
8442     goto out;
8443    
8444     - spin_lock(&inode->i_lock);
8445     /*
8446     * We don't want to refresh inode->i_size,
8447     * because we may have cached data
8448     */
8449     - i_size = inode->i_size;
8450     - v9fs_stat2inode(st, inode, inode->i_sb);
8451     - if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
8452     - inode->i_size = i_size;
8453     - spin_unlock(&inode->i_lock);
8454     + flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
8455     + V9FS_STAT2INODE_KEEP_ISIZE : 0;
8456     + v9fs_stat2inode(st, inode, inode->i_sb, flags);
8457     out:
8458     p9stat_free(st);
8459     kfree(st);
8460     diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
8461     index 4823e1c46999..a950a927a626 100644
8462     --- a/fs/9p/vfs_inode_dotl.c
8463     +++ b/fs/9p/vfs_inode_dotl.c
8464     @@ -143,7 +143,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
8465     if (retval)
8466     goto error;
8467    
8468     - v9fs_stat2inode_dotl(st, inode);
8469     + v9fs_stat2inode_dotl(st, inode, 0);
8470     v9fs_cache_inode_get_cookie(inode);
8471     retval = v9fs_get_acl(inode, fid);
8472     if (retval)
8473     @@ -496,7 +496,7 @@ v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat,
8474     if (IS_ERR(st))
8475     return PTR_ERR(st);
8476    
8477     - v9fs_stat2inode_dotl(st, d_inode(dentry));
8478     + v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
8479     generic_fillattr(d_inode(dentry), stat);
8480     /* Change block size to what the server returned */
8481     stat->blksize = st->st_blksize;
8482     @@ -607,11 +607,13 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
8483     * v9fs_stat2inode_dotl - populate an inode structure with stat info
8484     * @stat: stat structure
8485     * @inode: inode to populate
8486     + * @flags: ctrl flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
8487     *
8488     */
8489    
8490     void
8491     -v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
8492     +v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
8493     + unsigned int flags)
8494     {
8495     umode_t mode;
8496     struct v9fs_inode *v9inode = V9FS_I(inode);
8497     @@ -631,7 +633,8 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
8498     mode |= inode->i_mode & ~S_IALLUGO;
8499     inode->i_mode = mode;
8500    
8501     - i_size_write(inode, stat->st_size);
8502     + if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
8503     + v9fs_i_size_write(inode, stat->st_size);
8504     inode->i_blocks = stat->st_blocks;
8505     } else {
8506     if (stat->st_result_mask & P9_STATS_ATIME) {
8507     @@ -661,8 +664,9 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
8508     }
8509     if (stat->st_result_mask & P9_STATS_RDEV)
8510     inode->i_rdev = new_decode_dev(stat->st_rdev);
8511     - if (stat->st_result_mask & P9_STATS_SIZE)
8512     - i_size_write(inode, stat->st_size);
8513     + if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
8514     + stat->st_result_mask & P9_STATS_SIZE)
8515     + v9fs_i_size_write(inode, stat->st_size);
8516     if (stat->st_result_mask & P9_STATS_BLOCKS)
8517     inode->i_blocks = stat->st_blocks;
8518     }
8519     @@ -928,9 +932,9 @@ v9fs_vfs_get_link_dotl(struct dentry *dentry,
8520    
8521     int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
8522     {
8523     - loff_t i_size;
8524     struct p9_stat_dotl *st;
8525     struct v9fs_session_info *v9ses;
8526     + unsigned int flags;
8527    
8528     v9ses = v9fs_inode2v9ses(inode);
8529     st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
8530     @@ -942,16 +946,13 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
8531     if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
8532     goto out;
8533    
8534     - spin_lock(&inode->i_lock);
8535     /*
8536     * We don't want to refresh inode->i_size,
8537     * because we may have cached data
8538     */
8539     - i_size = inode->i_size;
8540     - v9fs_stat2inode_dotl(st, inode);
8541     - if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
8542     - inode->i_size = i_size;
8543     - spin_unlock(&inode->i_lock);
8544     + flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
8545     + V9FS_STAT2INODE_KEEP_ISIZE : 0;
8546     + v9fs_stat2inode_dotl(st, inode, flags);
8547     out:
8548     kfree(st);
8549     return 0;
8550     diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
8551     index 48ce50484e80..eeab9953af89 100644
8552     --- a/fs/9p/vfs_super.c
8553     +++ b/fs/9p/vfs_super.c
8554     @@ -172,7 +172,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
8555     goto release_sb;
8556     }
8557     d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
8558     - v9fs_stat2inode_dotl(st, d_inode(root));
8559     + v9fs_stat2inode_dotl(st, d_inode(root), 0);
8560     kfree(st);
8561     } else {
8562     struct p9_wstat *st = NULL;
8563     @@ -183,7 +183,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
8564     }
8565    
8566     d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
8567     - v9fs_stat2inode(st, d_inode(root), sb);
8568     + v9fs_stat2inode(st, d_inode(root), sb, 0);
8569    
8570     p9stat_free(st);
8571     kfree(st);
8572     diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
8573     index 3b66c957ea6f..5810463dc6d2 100644
8574     --- a/fs/btrfs/acl.c
8575     +++ b/fs/btrfs/acl.c
8576     @@ -9,6 +9,7 @@
8577     #include <linux/posix_acl_xattr.h>
8578     #include <linux/posix_acl.h>
8579     #include <linux/sched.h>
8580     +#include <linux/sched/mm.h>
8581     #include <linux/slab.h>
8582    
8583     #include "ctree.h"
8584     @@ -72,8 +73,16 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
8585     }
8586    
8587     if (acl) {
8588     + unsigned int nofs_flag;
8589     +
8590     size = posix_acl_xattr_size(acl->a_count);
8591     + /*
8592     + * We're holding a transaction handle, so use a NOFS memory
8593     + * allocation context to avoid deadlock if reclaim happens.
8594     + */
8595     + nofs_flag = memalloc_nofs_save();
8596     value = kmalloc(size, GFP_KERNEL);
8597     + memalloc_nofs_restore(nofs_flag);
8598     if (!value) {
8599     ret = -ENOMEM;
8600     goto out;
8601     diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
8602     index 8750c835f535..c4dea3b7349e 100644
8603     --- a/fs/btrfs/dev-replace.c
8604     +++ b/fs/btrfs/dev-replace.c
8605     @@ -862,6 +862,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
8606     btrfs_destroy_dev_replace_tgtdev(tgt_device);
8607     break;
8608     default:
8609     + up_write(&dev_replace->rwsem);
8610     result = -EINVAL;
8611     }
8612    
8613     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
8614     index 6a2a2a951705..888d72dda794 100644
8615     --- a/fs/btrfs/disk-io.c
8616     +++ b/fs/btrfs/disk-io.c
8617     @@ -17,6 +17,7 @@
8618     #include <linux/semaphore.h>
8619     #include <linux/error-injection.h>
8620     #include <linux/crc32c.h>
8621     +#include <linux/sched/mm.h>
8622     #include <asm/unaligned.h>
8623     #include "ctree.h"
8624     #include "disk-io.h"
8625     @@ -1258,10 +1259,17 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
8626     struct btrfs_root *tree_root = fs_info->tree_root;
8627     struct btrfs_root *root;
8628     struct btrfs_key key;
8629     + unsigned int nofs_flag;
8630     int ret = 0;
8631     uuid_le uuid = NULL_UUID_LE;
8632    
8633     + /*
8634     + * We're holding a transaction handle, so use a NOFS memory allocation
8635     + * context to avoid deadlock if reclaim happens.
8636     + */
8637     + nofs_flag = memalloc_nofs_save();
8638     root = btrfs_alloc_root(fs_info, GFP_KERNEL);
8639     + memalloc_nofs_restore(nofs_flag);
8640     if (!root)
8641     return ERR_PTR(-ENOMEM);
8642    
8643     diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
8644     index 52abe4082680..1bfb7207bbf0 100644
8645     --- a/fs/btrfs/extent_io.c
8646     +++ b/fs/btrfs/extent_io.c
8647     @@ -2985,11 +2985,11 @@ static int __do_readpage(struct extent_io_tree *tree,
8648     */
8649     if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
8650     prev_em_start && *prev_em_start != (u64)-1 &&
8651     - *prev_em_start != em->orig_start)
8652     + *prev_em_start != em->start)
8653     force_bio_submit = true;
8654    
8655     if (prev_em_start)
8656     - *prev_em_start = em->orig_start;
8657     + *prev_em_start = em->start;
8658    
8659     free_extent_map(em);
8660     em = NULL;
8661     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
8662     index 9c8e1734429c..6e1119496721 100644
8663     --- a/fs/btrfs/ioctl.c
8664     +++ b/fs/btrfs/ioctl.c
8665     @@ -3206,21 +3206,6 @@ out:
8666     return ret;
8667     }
8668    
8669     -static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
8670     -{
8671     - inode_unlock(inode1);
8672     - inode_unlock(inode2);
8673     -}
8674     -
8675     -static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
8676     -{
8677     - if (inode1 < inode2)
8678     - swap(inode1, inode2);
8679     -
8680     - inode_lock_nested(inode1, I_MUTEX_PARENT);
8681     - inode_lock_nested(inode2, I_MUTEX_CHILD);
8682     -}
8683     -
8684     static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
8685     struct inode *inode2, u64 loff2, u64 len)
8686     {
8687     @@ -3989,7 +3974,7 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
8688     if (same_inode)
8689     inode_lock(inode_in);
8690     else
8691     - btrfs_double_inode_lock(inode_in, inode_out);
8692     + lock_two_nondirectories(inode_in, inode_out);
8693    
8694     /*
8695     * Now that the inodes are locked, we need to start writeback ourselves
8696     @@ -4039,7 +4024,7 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
8697     if (same_inode)
8698     inode_unlock(inode_in);
8699     else
8700     - btrfs_double_inode_unlock(inode_in, inode_out);
8701     + unlock_two_nondirectories(inode_in, inode_out);
8702    
8703     return ret;
8704     }
8705     @@ -4069,7 +4054,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
8706     if (same_inode)
8707     inode_unlock(src_inode);
8708     else
8709     - btrfs_double_inode_unlock(src_inode, dst_inode);
8710     + unlock_two_nondirectories(src_inode, dst_inode);
8711    
8712     return ret < 0 ? ret : len;
8713     }
8714     diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
8715     index 6dcd36d7b849..1aeac70d0531 100644
8716     --- a/fs/btrfs/scrub.c
8717     +++ b/fs/btrfs/scrub.c
8718     @@ -584,6 +584,7 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
8719     sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
8720     sctx->curr = -1;
8721     sctx->fs_info = fs_info;
8722     + INIT_LIST_HEAD(&sctx->csum_list);
8723     for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
8724     struct scrub_bio *sbio;
8725    
8726     @@ -608,7 +609,6 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
8727     atomic_set(&sctx->workers_pending, 0);
8728     atomic_set(&sctx->cancel_req, 0);
8729     sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
8730     - INIT_LIST_HEAD(&sctx->csum_list);
8731    
8732     spin_lock_init(&sctx->list_lock);
8733     spin_lock_init(&sctx->stat_lock);
8734     @@ -3770,16 +3770,6 @@ fail_scrub_workers:
8735     return -ENOMEM;
8736     }
8737    
8738     -static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
8739     -{
8740     - if (--fs_info->scrub_workers_refcnt == 0) {
8741     - btrfs_destroy_workqueue(fs_info->scrub_workers);
8742     - btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
8743     - btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
8744     - }
8745     - WARN_ON(fs_info->scrub_workers_refcnt < 0);
8746     -}
8747     -
8748     int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
8749     u64 end, struct btrfs_scrub_progress *progress,
8750     int readonly, int is_dev_replace)
8751     @@ -3788,6 +3778,9 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
8752     int ret;
8753     struct btrfs_device *dev;
8754     unsigned int nofs_flag;
8755     + struct btrfs_workqueue *scrub_workers = NULL;
8756     + struct btrfs_workqueue *scrub_wr_comp = NULL;
8757     + struct btrfs_workqueue *scrub_parity = NULL;
8758    
8759     if (btrfs_fs_closing(fs_info))
8760     return -EINVAL;
8761     @@ -3927,9 +3920,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
8762    
8763     mutex_lock(&fs_info->scrub_lock);
8764     dev->scrub_ctx = NULL;
8765     - scrub_workers_put(fs_info);
8766     + if (--fs_info->scrub_workers_refcnt == 0) {
8767     + scrub_workers = fs_info->scrub_workers;
8768     + scrub_wr_comp = fs_info->scrub_wr_completion_workers;
8769     + scrub_parity = fs_info->scrub_parity_workers;
8770     + }
8771     mutex_unlock(&fs_info->scrub_lock);
8772    
8773     + btrfs_destroy_workqueue(scrub_workers);
8774     + btrfs_destroy_workqueue(scrub_wr_comp);
8775     + btrfs_destroy_workqueue(scrub_parity);
8776     scrub_put_ctx(sctx);
8777    
8778     return ret;
8779     diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
8780     index 15561926ab32..48523bcabae9 100644
8781     --- a/fs/btrfs/volumes.c
8782     +++ b/fs/btrfs/volumes.c
8783     @@ -6782,10 +6782,10 @@ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
8784     }
8785    
8786     if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
8787     - (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
8788     + (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
8789     (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
8790     (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
8791     - (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
8792     + (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
8793     ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
8794     num_stripes != 1)) {
8795     btrfs_err(fs_info,
8796     diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
8797     index 42f0d67f1054..ed49222abecb 100644
8798     --- a/fs/cifs/cifs_fs_sb.h
8799     +++ b/fs/cifs/cifs_fs_sb.h
8800     @@ -58,6 +58,7 @@ struct cifs_sb_info {
8801     spinlock_t tlink_tree_lock;
8802     struct tcon_link *master_tlink;
8803     struct nls_table *local_nls;
8804     + unsigned int bsize;
8805     unsigned int rsize;
8806     unsigned int wsize;
8807     unsigned long actimeo; /* attribute cache timeout (jiffies) */
8808     diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
8809     index 62d48d486d8f..f2c0d863fb52 100644
8810     --- a/fs/cifs/cifsfs.c
8811     +++ b/fs/cifs/cifsfs.c
8812     @@ -554,6 +554,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
8813    
8814     seq_printf(s, ",rsize=%u", cifs_sb->rsize);
8815     seq_printf(s, ",wsize=%u", cifs_sb->wsize);
8816     + seq_printf(s, ",bsize=%u", cifs_sb->bsize);
8817     seq_printf(s, ",echo_interval=%lu",
8818     tcon->ses->server->echo_interval / HZ);
8819     if (tcon->snapshot_time)
8820     diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
8821     index 94dbdbe5be34..1b25e6e95d45 100644
8822     --- a/fs/cifs/cifsglob.h
8823     +++ b/fs/cifs/cifsglob.h
8824     @@ -236,6 +236,8 @@ struct smb_version_operations {
8825     int * (*get_credits_field)(struct TCP_Server_Info *, const int);
8826     unsigned int (*get_credits)(struct mid_q_entry *);
8827     __u64 (*get_next_mid)(struct TCP_Server_Info *);
8828     + void (*revert_current_mid)(struct TCP_Server_Info *server,
8829     + const unsigned int val);
8830     /* data offset from read response message */
8831     unsigned int (*read_data_offset)(char *);
8832     /*
8833     @@ -557,6 +559,7 @@ struct smb_vol {
8834     bool resilient:1; /* noresilient not required since not fored for CA */
8835     bool domainauto:1;
8836     bool rdma:1;
8837     + unsigned int bsize;
8838     unsigned int rsize;
8839     unsigned int wsize;
8840     bool sockopt_tcp_nodelay:1;
8841     @@ -770,6 +773,22 @@ get_next_mid(struct TCP_Server_Info *server)
8842     return cpu_to_le16(mid);
8843     }
8844    
8845     +static inline void
8846     +revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
8847     +{
8848     + if (server->ops->revert_current_mid)
8849     + server->ops->revert_current_mid(server, val);
8850     +}
8851     +
8852     +static inline void
8853     +revert_current_mid_from_hdr(struct TCP_Server_Info *server,
8854     + const struct smb2_sync_hdr *shdr)
8855     +{
8856     + unsigned int num = le16_to_cpu(shdr->CreditCharge);
8857     +
8858     + return revert_current_mid(server, num > 0 ? num : 1);
8859     +}
8860     +
8861     static inline __u16
8862     get_mid(const struct smb_hdr *smb)
8863     {
8864     @@ -1422,6 +1441,7 @@ struct mid_q_entry {
8865     struct kref refcount;
8866     struct TCP_Server_Info *server; /* server corresponding to this mid */
8867     __u64 mid; /* multiplex id */
8868     + __u16 credits; /* number of credits consumed by this mid */
8869     __u32 pid; /* process id */
8870     __u32 sequence_number; /* for CIFS signing */
8871     unsigned long when_alloc; /* when mid was created */
8872     diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
8873     index bb54ccf8481c..551924beb86f 100644
8874     --- a/fs/cifs/cifssmb.c
8875     +++ b/fs/cifs/cifssmb.c
8876     @@ -2125,12 +2125,13 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
8877    
8878     wdata2->cfile = find_writable_file(CIFS_I(inode), false);
8879     if (!wdata2->cfile) {
8880     - cifs_dbg(VFS, "No writable handles for inode\n");
8881     + cifs_dbg(VFS, "No writable handle to retry writepages\n");
8882     rc = -EBADF;
8883     - break;
8884     + } else {
8885     + wdata2->pid = wdata2->cfile->pid;
8886     + rc = server->ops->async_writev(wdata2,
8887     + cifs_writedata_release);
8888     }
8889     - wdata2->pid = wdata2->cfile->pid;
8890     - rc = server->ops->async_writev(wdata2, cifs_writedata_release);
8891    
8892     for (j = 0; j < nr_pages; j++) {
8893     unlock_page(wdata2->pages[j]);
8894     @@ -2145,6 +2146,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
8895     kref_put(&wdata2->refcount, cifs_writedata_release);
8896     if (is_retryable_error(rc))
8897     continue;
8898     + i += nr_pages;
8899     break;
8900     }
8901    
8902     @@ -2152,6 +2154,13 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
8903     i += nr_pages;
8904     } while (i < wdata->nr_pages);
8905    
8906     + /* cleanup remaining pages from the original wdata */
8907     + for (; i < wdata->nr_pages; i++) {
8908     + SetPageError(wdata->pages[i]);
8909     + end_page_writeback(wdata->pages[i]);
8910     + put_page(wdata->pages[i]);
8911     + }
8912     +
8913     if (rc != 0 && !is_retryable_error(rc))
8914     mapping_set_error(inode->i_mapping, rc);
8915     kref_put(&wdata->refcount, cifs_writedata_release);
8916     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
8917     index 8463c940e0e5..e61cd2938c9e 100644
8918     --- a/fs/cifs/connect.c
8919     +++ b/fs/cifs/connect.c
8920     @@ -102,7 +102,7 @@ enum {
8921     Opt_backupuid, Opt_backupgid, Opt_uid,
8922     Opt_cruid, Opt_gid, Opt_file_mode,
8923     Opt_dirmode, Opt_port,
8924     - Opt_rsize, Opt_wsize, Opt_actimeo,
8925     + Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
8926     Opt_echo_interval, Opt_max_credits,
8927     Opt_snapshot,
8928    
8929     @@ -204,6 +204,7 @@ static const match_table_t cifs_mount_option_tokens = {
8930     { Opt_dirmode, "dirmode=%s" },
8931     { Opt_dirmode, "dir_mode=%s" },
8932     { Opt_port, "port=%s" },
8933     + { Opt_blocksize, "bsize=%s" },
8934     { Opt_rsize, "rsize=%s" },
8935     { Opt_wsize, "wsize=%s" },
8936     { Opt_actimeo, "actimeo=%s" },
8937     @@ -1571,7 +1572,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
8938     vol->cred_uid = current_uid();
8939     vol->linux_uid = current_uid();
8940     vol->linux_gid = current_gid();
8941     -
8942     + vol->bsize = 1024 * 1024; /* can improve cp performance significantly */
8943     /*
8944     * default to SFM style remapping of seven reserved characters
8945     * unless user overrides it or we negotiate CIFS POSIX where
8946     @@ -1944,6 +1945,26 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
8947     }
8948     port = (unsigned short)option;
8949     break;
8950     + case Opt_blocksize:
8951     + if (get_option_ul(args, &option)) {
8952     + cifs_dbg(VFS, "%s: Invalid blocksize value\n",
8953     + __func__);
8954     + goto cifs_parse_mount_err;
8955     + }
8956     + /*
8957     + * inode blocksize realistically should never need to be
8958     + * less than 16K or greater than 16M and default is 1MB.
8959     + * Note that small inode block sizes (e.g. 64K) can lead
8960     + * to very poor performance of common tools like cp and scp
8961     + */
8962     + if ((option < CIFS_MAX_MSGSIZE) ||
8963     + (option > (4 * SMB3_DEFAULT_IOSIZE))) {
8964     + cifs_dbg(VFS, "%s: Invalid blocksize\n",
8965     + __func__);
8966     + goto cifs_parse_mount_err;
8967     + }
8968     + vol->bsize = option;
8969     + break;
8970     case Opt_rsize:
8971     if (get_option_ul(args, &option)) {
8972     cifs_dbg(VFS, "%s: Invalid rsize value\n",
8973     @@ -3839,6 +3860,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
8974     spin_lock_init(&cifs_sb->tlink_tree_lock);
8975     cifs_sb->tlink_tree = RB_ROOT;
8976    
8977     + cifs_sb->bsize = pvolume_info->bsize;
8978     /*
8979     * Temporarily set r/wsize for matching superblock. If we end up using
8980     * new sb then client will later negotiate it downward if needed.
8981     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
8982     index 659ce1b92c44..95461db80011 100644
8983     --- a/fs/cifs/file.c
8984     +++ b/fs/cifs/file.c
8985     @@ -3028,14 +3028,16 @@ cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
8986     * these pages but not on the region from pos to ppos+len-1.
8987     */
8988     written = cifs_user_writev(iocb, from);
8989     - if (written > 0 && CIFS_CACHE_READ(cinode)) {
8990     + if (CIFS_CACHE_READ(cinode)) {
8991     /*
8992     - * Windows 7 server can delay breaking level2 oplock if a write
8993     - * request comes - break it on the client to prevent reading
8994     - * an old data.
8995     + * We have read level caching and we have just sent a write
8996     + * request to the server thus making data in the cache stale.
8997     + * Zap the cache and set oplock/lease level to NONE to avoid
8998     + * reading stale data from the cache. All subsequent read
8999     + * operations will read new data from the server.
9000     */
9001     cifs_zap_mapping(inode);
9002     - cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
9003     + cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
9004     inode);
9005     cinode->oplock = 0;
9006     }
9007     diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
9008     index 478003644916..53fdb5df0d2e 100644
9009     --- a/fs/cifs/inode.c
9010     +++ b/fs/cifs/inode.c
9011     @@ -2080,7 +2080,7 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
9012     return rc;
9013    
9014     generic_fillattr(inode, stat);
9015     - stat->blksize = CIFS_MAX_MSGSIZE;
9016     + stat->blksize = cifs_sb->bsize;
9017     stat->ino = CIFS_I(inode)->uniqueid;
9018    
9019     /* old CIFS Unix Extensions doesn't return create time */
9020     diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
9021     index 7b8b58fb4d3f..58700d2ba8cd 100644
9022     --- a/fs/cifs/smb2misc.c
9023     +++ b/fs/cifs/smb2misc.c
9024     @@ -517,7 +517,6 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
9025     __u8 lease_state;
9026     struct list_head *tmp;
9027     struct cifsFileInfo *cfile;
9028     - struct TCP_Server_Info *server = tcon->ses->server;
9029     struct cifs_pending_open *open;
9030     struct cifsInodeInfo *cinode;
9031     int ack_req = le32_to_cpu(rsp->Flags &
9032     @@ -537,13 +536,25 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
9033     cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
9034     le32_to_cpu(rsp->NewLeaseState));
9035    
9036     - server->ops->set_oplock_level(cinode, lease_state, 0, NULL);
9037     -
9038     if (ack_req)
9039     cfile->oplock_break_cancelled = false;
9040     else
9041     cfile->oplock_break_cancelled = true;
9042    
9043     + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
9044     +
9045     + /*
9046     + * Set or clear flags depending on the lease state being READ.
9047     + * HANDLE caching flag should be added when the client starts
9048     + * to defer closing remote file handles with HANDLE leases.
9049     + */
9050     + if (lease_state & SMB2_LEASE_READ_CACHING_HE)
9051     + set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
9052     + &cinode->flags);
9053     + else
9054     + clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
9055     + &cinode->flags);
9056     +
9057     queue_work(cifsoplockd_wq, &cfile->oplock_break);
9058     kfree(lw);
9059     return true;
9060     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
9061     index 6f96e2292856..b29f711ab965 100644
9062     --- a/fs/cifs/smb2ops.c
9063     +++ b/fs/cifs/smb2ops.c
9064     @@ -219,6 +219,15 @@ smb2_get_next_mid(struct TCP_Server_Info *server)
9065     return mid;
9066     }
9067    
9068     +static void
9069     +smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
9070     +{
9071     + spin_lock(&GlobalMid_Lock);
9072     + if (server->CurrentMid >= val)
9073     + server->CurrentMid -= val;
9074     + spin_unlock(&GlobalMid_Lock);
9075     +}
9076     +
9077     static struct mid_q_entry *
9078     smb2_find_mid(struct TCP_Server_Info *server, char *buf)
9079     {
9080     @@ -2594,6 +2603,15 @@ smb2_downgrade_oplock(struct TCP_Server_Info *server,
9081     server->ops->set_oplock_level(cinode, 0, 0, NULL);
9082     }
9083    
9084     +static void
9085     +smb21_downgrade_oplock(struct TCP_Server_Info *server,
9086     + struct cifsInodeInfo *cinode, bool set_level2)
9087     +{
9088     + server->ops->set_oplock_level(cinode,
9089     + set_level2 ? SMB2_LEASE_READ_CACHING_HE :
9090     + 0, 0, NULL);
9091     +}
9092     +
9093     static void
9094     smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
9095     unsigned int epoch, bool *purge_cache)
9096     @@ -3541,6 +3559,7 @@ struct smb_version_operations smb20_operations = {
9097     .get_credits = smb2_get_credits,
9098     .wait_mtu_credits = cifs_wait_mtu_credits,
9099     .get_next_mid = smb2_get_next_mid,
9100     + .revert_current_mid = smb2_revert_current_mid,
9101     .read_data_offset = smb2_read_data_offset,
9102     .read_data_length = smb2_read_data_length,
9103     .map_error = map_smb2_to_linux_error,
9104     @@ -3636,6 +3655,7 @@ struct smb_version_operations smb21_operations = {
9105     .get_credits = smb2_get_credits,
9106     .wait_mtu_credits = smb2_wait_mtu_credits,
9107     .get_next_mid = smb2_get_next_mid,
9108     + .revert_current_mid = smb2_revert_current_mid,
9109     .read_data_offset = smb2_read_data_offset,
9110     .read_data_length = smb2_read_data_length,
9111     .map_error = map_smb2_to_linux_error,
9112     @@ -3646,7 +3666,7 @@ struct smb_version_operations smb21_operations = {
9113     .print_stats = smb2_print_stats,
9114     .is_oplock_break = smb2_is_valid_oplock_break,
9115     .handle_cancelled_mid = smb2_handle_cancelled_mid,
9116     - .downgrade_oplock = smb2_downgrade_oplock,
9117     + .downgrade_oplock = smb21_downgrade_oplock,
9118     .need_neg = smb2_need_neg,
9119     .negotiate = smb2_negotiate,
9120     .negotiate_wsize = smb2_negotiate_wsize,
9121     @@ -3732,6 +3752,7 @@ struct smb_version_operations smb30_operations = {
9122     .get_credits = smb2_get_credits,
9123     .wait_mtu_credits = smb2_wait_mtu_credits,
9124     .get_next_mid = smb2_get_next_mid,
9125     + .revert_current_mid = smb2_revert_current_mid,
9126     .read_data_offset = smb2_read_data_offset,
9127     .read_data_length = smb2_read_data_length,
9128     .map_error = map_smb2_to_linux_error,
9129     @@ -3743,7 +3764,7 @@ struct smb_version_operations smb30_operations = {
9130     .dump_share_caps = smb2_dump_share_caps,
9131     .is_oplock_break = smb2_is_valid_oplock_break,
9132     .handle_cancelled_mid = smb2_handle_cancelled_mid,
9133     - .downgrade_oplock = smb2_downgrade_oplock,
9134     + .downgrade_oplock = smb21_downgrade_oplock,
9135     .need_neg = smb2_need_neg,
9136     .negotiate = smb2_negotiate,
9137     .negotiate_wsize = smb3_negotiate_wsize,
9138     @@ -3837,6 +3858,7 @@ struct smb_version_operations smb311_operations = {
9139     .get_credits = smb2_get_credits,
9140     .wait_mtu_credits = smb2_wait_mtu_credits,
9141     .get_next_mid = smb2_get_next_mid,
9142     + .revert_current_mid = smb2_revert_current_mid,
9143     .read_data_offset = smb2_read_data_offset,
9144     .read_data_length = smb2_read_data_length,
9145     .map_error = map_smb2_to_linux_error,
9146     @@ -3848,7 +3870,7 @@ struct smb_version_operations smb311_operations = {
9147     .dump_share_caps = smb2_dump_share_caps,
9148     .is_oplock_break = smb2_is_valid_oplock_break,
9149     .handle_cancelled_mid = smb2_handle_cancelled_mid,
9150     - .downgrade_oplock = smb2_downgrade_oplock,
9151     + .downgrade_oplock = smb21_downgrade_oplock,
9152     .need_neg = smb2_need_neg,
9153     .negotiate = smb2_negotiate,
9154     .negotiate_wsize = smb3_negotiate_wsize,
9155     diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
9156     index 7b351c65ee46..63264db78b89 100644
9157     --- a/fs/cifs/smb2transport.c
9158     +++ b/fs/cifs/smb2transport.c
9159     @@ -576,6 +576,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
9160     struct TCP_Server_Info *server)
9161     {
9162     struct mid_q_entry *temp;
9163     + unsigned int credits = le16_to_cpu(shdr->CreditCharge);
9164    
9165     if (server == NULL) {
9166     cifs_dbg(VFS, "Null TCP session in smb2_mid_entry_alloc\n");
9167     @@ -586,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
9168     memset(temp, 0, sizeof(struct mid_q_entry));
9169     kref_init(&temp->refcount);
9170     temp->mid = le64_to_cpu(shdr->MessageId);
9171     + temp->credits = credits > 0 ? credits : 1;
9172     temp->pid = current->pid;
9173     temp->command = shdr->Command; /* Always LE */
9174     temp->when_alloc = jiffies;
9175     @@ -674,13 +676,18 @@ smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
9176     smb2_seq_num_into_buf(ses->server, shdr);
9177    
9178     rc = smb2_get_mid_entry(ses, shdr, &mid);
9179     - if (rc)
9180     + if (rc) {
9181     + revert_current_mid_from_hdr(ses->server, shdr);
9182     return ERR_PTR(rc);
9183     + }
9184     +
9185     rc = smb2_sign_rqst(rqst, ses->server);
9186     if (rc) {
9187     + revert_current_mid_from_hdr(ses->server, shdr);
9188     cifs_delete_mid(mid);
9189     return ERR_PTR(rc);
9190     }
9191     +
9192     return mid;
9193     }
9194    
9195     @@ -695,11 +702,14 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
9196     smb2_seq_num_into_buf(server, shdr);
9197    
9198     mid = smb2_mid_entry_alloc(shdr, server);
9199     - if (mid == NULL)
9200     + if (mid == NULL) {
9201     + revert_current_mid_from_hdr(server, shdr);
9202     return ERR_PTR(-ENOMEM);
9203     + }
9204    
9205     rc = smb2_sign_rqst(rqst, server);
9206     if (rc) {
9207     + revert_current_mid_from_hdr(server, shdr);
9208     DeleteMidQEntry(mid);
9209     return ERR_PTR(rc);
9210     }
9211     diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
9212     index 53532bd3f50d..9544eb99b5a2 100644
9213     --- a/fs/cifs/transport.c
9214     +++ b/fs/cifs/transport.c
9215     @@ -647,6 +647,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9216     cifs_in_send_dec(server);
9217    
9218     if (rc < 0) {
9219     + revert_current_mid(server, mid->credits);
9220     server->sequence_number -= 2;
9221     cifs_delete_mid(mid);
9222     }
9223     @@ -868,6 +869,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
9224     for (i = 0; i < num_rqst; i++) {
9225     midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
9226     if (IS_ERR(midQ[i])) {
9227     + revert_current_mid(ses->server, i);
9228     for (j = 0; j < i; j++)
9229     cifs_delete_mid(midQ[j]);
9230     mutex_unlock(&ses->server->srv_mutex);
9231     @@ -897,8 +899,10 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
9232     for (i = 0; i < num_rqst; i++)
9233     cifs_save_when_sent(midQ[i]);
9234    
9235     - if (rc < 0)
9236     + if (rc < 0) {
9237     + revert_current_mid(ses->server, num_rqst);
9238     ses->server->sequence_number -= 2;
9239     + }
9240    
9241     mutex_unlock(&ses->server->srv_mutex);
9242    
9243     diff --git a/fs/dax.c b/fs/dax.c
9244     index 6959837cc465..05cca2214ae3 100644
9245     --- a/fs/dax.c
9246     +++ b/fs/dax.c
9247     @@ -843,9 +843,8 @@ unlock_pte:
9248     static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
9249     struct address_space *mapping, void *entry)
9250     {
9251     - unsigned long pfn;
9252     + unsigned long pfn, index, count;
9253     long ret = 0;
9254     - size_t size;
9255    
9256     /*
9257     * A page got tagged dirty in DAX mapping? Something is seriously
9258     @@ -894,17 +893,18 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
9259     xas_unlock_irq(xas);
9260    
9261     /*
9262     - * Even if dax_writeback_mapping_range() was given a wbc->range_start
9263     - * in the middle of a PMD, the 'index' we are given will be aligned to
9264     - * the start index of the PMD, as will the pfn we pull from 'entry'.
9265     + * If dax_writeback_mapping_range() was given a wbc->range_start
9266     + * in the middle of a PMD, the 'index' we use needs to be
9267     + * aligned to the start of the PMD.
9268     * This allows us to flush for PMD_SIZE and not have to worry about
9269     * partial PMD writebacks.
9270     */
9271     pfn = dax_to_pfn(entry);
9272     - size = PAGE_SIZE << dax_entry_order(entry);
9273     + count = 1UL << dax_entry_order(entry);
9274     + index = xas->xa_index & ~(count - 1);
9275    
9276     - dax_entry_mkclean(mapping, xas->xa_index, pfn);
9277     - dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
9278     + dax_entry_mkclean(mapping, index, pfn);
9279     + dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
9280     /*
9281     * After we have flushed the cache, we can clear the dirty tag. There
9282     * cannot be new dirty data in the pfn after the flush has completed as
9283     @@ -917,8 +917,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
9284     xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
9285     dax_wake_entry(xas, entry, false);
9286    
9287     - trace_dax_writeback_one(mapping->host, xas->xa_index,
9288     - size >> PAGE_SHIFT);
9289     + trace_dax_writeback_one(mapping->host, index, count);
9290     return ret;
9291    
9292     put_unlocked:
9293     diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
9294     index c53814539070..553a3f3300ae 100644
9295     --- a/fs/devpts/inode.c
9296     +++ b/fs/devpts/inode.c
9297     @@ -455,6 +455,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
9298     s->s_blocksize_bits = 10;
9299     s->s_magic = DEVPTS_SUPER_MAGIC;
9300     s->s_op = &devpts_sops;
9301     + s->s_d_op = &simple_dentry_operations;
9302     s->s_time_gran = 1;
9303    
9304     error = -ENOMEM;
9305     diff --git a/fs/ext2/super.c b/fs/ext2/super.c
9306     index 73b2d528237f..a9ea38182578 100644
9307     --- a/fs/ext2/super.c
9308     +++ b/fs/ext2/super.c
9309     @@ -757,7 +757,8 @@ static loff_t ext2_max_size(int bits)
9310     {
9311     loff_t res = EXT2_NDIR_BLOCKS;
9312     int meta_blocks;
9313     - loff_t upper_limit;
9314     + unsigned int upper_limit;
9315     + unsigned int ppb = 1 << (bits-2);
9316    
9317     /* This is calculated to be the largest file size for a
9318     * dense, file such that the total number of
9319     @@ -771,24 +772,34 @@ static loff_t ext2_max_size(int bits)
9320     /* total blocks in file system block size */
9321     upper_limit >>= (bits - 9);
9322    
9323     + /* Compute how many blocks we can address by block tree */
9324     + res += 1LL << (bits-2);
9325     + res += 1LL << (2*(bits-2));
9326     + res += 1LL << (3*(bits-2));
9327     + /* Does block tree limit file size? */
9328     + if (res < upper_limit)
9329     + goto check_lfs;
9330    
9331     + res = upper_limit;
9332     + /* How many metadata blocks are needed for addressing upper_limit? */
9333     + upper_limit -= EXT2_NDIR_BLOCKS;
9334     /* indirect blocks */
9335     meta_blocks = 1;
9336     + upper_limit -= ppb;
9337     /* double indirect blocks */
9338     - meta_blocks += 1 + (1LL << (bits-2));
9339     - /* tripple indirect blocks */
9340     - meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
9341     -
9342     - upper_limit -= meta_blocks;
9343     - upper_limit <<= bits;
9344     -
9345     - res += 1LL << (bits-2);
9346     - res += 1LL << (2*(bits-2));
9347     - res += 1LL << (3*(bits-2));
9348     + if (upper_limit < ppb * ppb) {
9349     + meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb);
9350     + res -= meta_blocks;
9351     + goto check_lfs;
9352     + }
9353     + meta_blocks += 1 + ppb;
9354     + upper_limit -= ppb * ppb;
9355     + /* tripple indirect blocks for the rest */
9356     + meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb) +
9357     + DIV_ROUND_UP(upper_limit, ppb*ppb);
9358     + res -= meta_blocks;
9359     +check_lfs:
9360     res <<= bits;
9361     - if (res > upper_limit)
9362     - res = upper_limit;
9363     -
9364     if (res > MAX_LFS_FILESIZE)
9365     res = MAX_LFS_FILESIZE;
9366    
9367     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
9368     index 185a05d3257e..508a37ec9271 100644
9369     --- a/fs/ext4/ext4.h
9370     +++ b/fs/ext4/ext4.h
9371     @@ -426,6 +426,9 @@ struct flex_groups {
9372     /* Flags that are appropriate for non-directories/regular files. */
9373     #define EXT4_OTHER_FLMASK (EXT4_NODUMP_FL | EXT4_NOATIME_FL)
9374    
9375     +/* The only flags that should be swapped */
9376     +#define EXT4_FL_SHOULD_SWAP (EXT4_HUGE_FILE_FL | EXT4_EXTENTS_FL)
9377     +
9378     /* Mask out flags that are inappropriate for the given type of inode. */
9379     static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
9380     {
9381     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
9382     index d37dafa1d133..2e76fb55d94a 100644
9383     --- a/fs/ext4/ioctl.c
9384     +++ b/fs/ext4/ioctl.c
9385     @@ -63,18 +63,20 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
9386     loff_t isize;
9387     struct ext4_inode_info *ei1;
9388     struct ext4_inode_info *ei2;
9389     + unsigned long tmp;
9390    
9391     ei1 = EXT4_I(inode1);
9392     ei2 = EXT4_I(inode2);
9393    
9394     swap(inode1->i_version, inode2->i_version);
9395     - swap(inode1->i_blocks, inode2->i_blocks);
9396     - swap(inode1->i_bytes, inode2->i_bytes);
9397     swap(inode1->i_atime, inode2->i_atime);
9398     swap(inode1->i_mtime, inode2->i_mtime);
9399    
9400     memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
9401     - swap(ei1->i_flags, ei2->i_flags);
9402     + tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP;
9403     + ei1->i_flags = (ei2->i_flags & EXT4_FL_SHOULD_SWAP) |
9404     + (ei1->i_flags & ~EXT4_FL_SHOULD_SWAP);
9405     + ei2->i_flags = tmp | (ei2->i_flags & ~EXT4_FL_SHOULD_SWAP);
9406     swap(ei1->i_disksize, ei2->i_disksize);
9407     ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
9408     ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
9409     @@ -115,28 +117,41 @@ static long swap_inode_boot_loader(struct super_block *sb,
9410     int err;
9411     struct inode *inode_bl;
9412     struct ext4_inode_info *ei_bl;
9413     -
9414     - if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
9415     - IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
9416     - ext4_has_inline_data(inode))
9417     - return -EINVAL;
9418     -
9419     - if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
9420     - !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
9421     - return -EPERM;
9422     + qsize_t size, size_bl, diff;
9423     + blkcnt_t blocks;
9424     + unsigned short bytes;
9425    
9426     inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
9427     if (IS_ERR(inode_bl))
9428     return PTR_ERR(inode_bl);
9429     ei_bl = EXT4_I(inode_bl);
9430    
9431     - filemap_flush(inode->i_mapping);
9432     - filemap_flush(inode_bl->i_mapping);
9433     -
9434     /* Protect orig inodes against a truncate and make sure,
9435     * that only 1 swap_inode_boot_loader is running. */
9436     lock_two_nondirectories(inode, inode_bl);
9437    
9438     + if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
9439     + IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
9440     + ext4_has_inline_data(inode)) {
9441     + err = -EINVAL;
9442     + goto journal_err_out;
9443     + }
9444     +
9445     + if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
9446     + !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN)) {
9447     + err = -EPERM;
9448     + goto journal_err_out;
9449     + }
9450     +
9451     + down_write(&EXT4_I(inode)->i_mmap_sem);
9452     + err = filemap_write_and_wait(inode->i_mapping);
9453     + if (err)
9454     + goto err_out;
9455     +
9456     + err = filemap_write_and_wait(inode_bl->i_mapping);
9457     + if (err)
9458     + goto err_out;
9459     +
9460     /* Wait for all existing dio workers */
9461     inode_dio_wait(inode);
9462     inode_dio_wait(inode_bl);
9463     @@ -147,7 +162,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
9464     handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
9465     if (IS_ERR(handle)) {
9466     err = -EINVAL;
9467     - goto journal_err_out;
9468     + goto err_out;
9469     }
9470    
9471     /* Protect extent tree against block allocations via delalloc */
9472     @@ -170,6 +185,13 @@ static long swap_inode_boot_loader(struct super_block *sb,
9473     memset(ei_bl->i_data, 0, sizeof(ei_bl->i_data));
9474     }
9475    
9476     + err = dquot_initialize(inode);
9477     + if (err)
9478     + goto err_out1;
9479     +
9480     + size = (qsize_t)(inode->i_blocks) * (1 << 9) + inode->i_bytes;
9481     + size_bl = (qsize_t)(inode_bl->i_blocks) * (1 << 9) + inode_bl->i_bytes;
9482     + diff = size - size_bl;
9483     swap_inode_data(inode, inode_bl);
9484    
9485     inode->i_ctime = inode_bl->i_ctime = current_time(inode);
9486     @@ -183,27 +205,51 @@ static long swap_inode_boot_loader(struct super_block *sb,
9487    
9488     err = ext4_mark_inode_dirty(handle, inode);
9489     if (err < 0) {
9490     + /* No need to update quota information. */
9491     ext4_warning(inode->i_sb,
9492     "couldn't mark inode #%lu dirty (err %d)",
9493     inode->i_ino, err);
9494     /* Revert all changes: */
9495     swap_inode_data(inode, inode_bl);
9496     ext4_mark_inode_dirty(handle, inode);
9497     - } else {
9498     - err = ext4_mark_inode_dirty(handle, inode_bl);
9499     - if (err < 0) {
9500     - ext4_warning(inode_bl->i_sb,
9501     - "couldn't mark inode #%lu dirty (err %d)",
9502     - inode_bl->i_ino, err);
9503     - /* Revert all changes: */
9504     - swap_inode_data(inode, inode_bl);
9505     - ext4_mark_inode_dirty(handle, inode);
9506     - ext4_mark_inode_dirty(handle, inode_bl);
9507     - }
9508     + goto err_out1;
9509     + }
9510     +
9511     + blocks = inode_bl->i_blocks;
9512     + bytes = inode_bl->i_bytes;
9513     + inode_bl->i_blocks = inode->i_blocks;
9514     + inode_bl->i_bytes = inode->i_bytes;
9515     + err = ext4_mark_inode_dirty(handle, inode_bl);
9516     + if (err < 0) {
9517     + /* No need to update quota information. */
9518     + ext4_warning(inode_bl->i_sb,
9519     + "couldn't mark inode #%lu dirty (err %d)",
9520     + inode_bl->i_ino, err);
9521     + goto revert;
9522     + }
9523     +
9524     + /* Bootloader inode should not be counted into quota information. */
9525     + if (diff > 0)
9526     + dquot_free_space(inode, diff);
9527     + else
9528     + err = dquot_alloc_space(inode, -1 * diff);
9529     +
9530     + if (err < 0) {
9531     +revert:
9532     + /* Revert all changes: */
9533     + inode_bl->i_blocks = blocks;
9534     + inode_bl->i_bytes = bytes;
9535     + swap_inode_data(inode, inode_bl);
9536     + ext4_mark_inode_dirty(handle, inode);
9537     + ext4_mark_inode_dirty(handle, inode_bl);
9538     }
9539     +
9540     +err_out1:
9541     ext4_journal_stop(handle);
9542     ext4_double_up_write_data_sem(inode, inode_bl);
9543    
9544     +err_out:
9545     + up_write(&EXT4_I(inode)->i_mmap_sem);
9546     journal_err_out:
9547     unlock_two_nondirectories(inode, inode_bl);
9548     iput(inode_bl);
9549     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
9550     index 48421de803b7..3d9b18505c0c 100644
9551     --- a/fs/ext4/resize.c
9552     +++ b/fs/ext4/resize.c
9553     @@ -1960,7 +1960,8 @@ retry:
9554     le16_to_cpu(es->s_reserved_gdt_blocks);
9555     n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
9556     n_blocks_count = (ext4_fsblk_t)n_group *
9557     - EXT4_BLOCKS_PER_GROUP(sb);
9558     + EXT4_BLOCKS_PER_GROUP(sb) +
9559     + le32_to_cpu(es->s_first_data_block);
9560     n_group--; /* set to last group number */
9561     }
9562    
9563     diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
9564     index cc35537232f2..f0d8dabe1ff5 100644
9565     --- a/fs/jbd2/transaction.c
9566     +++ b/fs/jbd2/transaction.c
9567     @@ -1252,11 +1252,12 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
9568     struct journal_head *jh;
9569     char *committed_data = NULL;
9570    
9571     - JBUFFER_TRACE(jh, "entry");
9572     if (jbd2_write_access_granted(handle, bh, true))
9573     return 0;
9574    
9575     jh = jbd2_journal_add_journal_head(bh);
9576     + JBUFFER_TRACE(jh, "entry");
9577     +
9578     /*
9579     * Do this first --- it can drop the journal lock, so we want to
9580     * make sure that obtaining the committed_data is done
9581     @@ -1367,15 +1368,17 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
9582    
9583     if (is_handle_aborted(handle))
9584     return -EROFS;
9585     - if (!buffer_jbd(bh)) {
9586     - ret = -EUCLEAN;
9587     - goto out;
9588     - }
9589     + if (!buffer_jbd(bh))
9590     + return -EUCLEAN;
9591     +
9592     /*
9593     * We don't grab jh reference here since the buffer must be part
9594     * of the running transaction.
9595     */
9596     jh = bh2jh(bh);
9597     + jbd_debug(5, "journal_head %p\n", jh);
9598     + JBUFFER_TRACE(jh, "entry");
9599     +
9600     /*
9601     * This and the following assertions are unreliable since we may see jh
9602     * in inconsistent state unless we grab bh_state lock. But this is
9603     @@ -1409,9 +1412,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
9604     }
9605    
9606     journal = transaction->t_journal;
9607     - jbd_debug(5, "journal_head %p\n", jh);
9608     - JBUFFER_TRACE(jh, "entry");
9609     -
9610     jbd_lock_bh_state(bh);
9611    
9612     if (jh->b_modified == 0) {
9613     @@ -1609,14 +1609,21 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
9614     /* However, if the buffer is still owned by a prior
9615     * (committing) transaction, we can't drop it yet... */
9616     JBUFFER_TRACE(jh, "belongs to older transaction");
9617     - /* ... but we CAN drop it from the new transaction if we
9618     - * have also modified it since the original commit. */
9619     + /* ... but we CAN drop it from the new transaction through
9620     + * marking the buffer as freed and set j_next_transaction to
9621     + * the new transaction, so that not only the commit code
9622     + * knows it should clear dirty bits when it is done with the
9623     + * buffer, but also the buffer can be checkpointed only
9624     + * after the new transaction commits. */
9625    
9626     - if (jh->b_next_transaction) {
9627     - J_ASSERT(jh->b_next_transaction == transaction);
9628     + set_buffer_freed(bh);
9629     +
9630     + if (!jh->b_next_transaction) {
9631     spin_lock(&journal->j_list_lock);
9632     - jh->b_next_transaction = NULL;
9633     + jh->b_next_transaction = transaction;
9634     spin_unlock(&journal->j_list_lock);
9635     + } else {
9636     + J_ASSERT(jh->b_next_transaction == transaction);
9637    
9638     /*
9639     * only drop a reference if this transaction modified
9640     diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
9641     index fdf527b6d79c..d71c9405874a 100644
9642     --- a/fs/kernfs/mount.c
9643     +++ b/fs/kernfs/mount.c
9644     @@ -196,8 +196,10 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
9645     return dentry;
9646    
9647     knparent = find_next_ancestor(kn, NULL);
9648     - if (WARN_ON(!knparent))
9649     + if (WARN_ON(!knparent)) {
9650     + dput(dentry);
9651     return ERR_PTR(-EINVAL);
9652     + }
9653    
9654     do {
9655     struct dentry *dtmp;
9656     @@ -206,8 +208,10 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
9657     if (kn == knparent)
9658     return dentry;
9659     kntmp = find_next_ancestor(kn, knparent);
9660     - if (WARN_ON(!kntmp))
9661     + if (WARN_ON(!kntmp)) {
9662     + dput(dentry);
9663     return ERR_PTR(-EINVAL);
9664     + }
9665     dtmp = lookup_one_len_unlocked(kntmp->name, dentry,
9666     strlen(kntmp->name));
9667     dput(dentry);
9668     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
9669     index 557a5d636183..64ac80ec6b7b 100644
9670     --- a/fs/nfs/nfs4proc.c
9671     +++ b/fs/nfs/nfs4proc.c
9672     @@ -947,6 +947,13 @@ nfs4_sequence_process_interrupted(struct nfs_client *client,
9673    
9674     #endif /* !CONFIG_NFS_V4_1 */
9675    
9676     +static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
9677     +{
9678     + res->sr_timestamp = jiffies;
9679     + res->sr_status_flags = 0;
9680     + res->sr_status = 1;
9681     +}
9682     +
9683     static
9684     void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
9685     struct nfs4_sequence_res *res,
9686     @@ -958,10 +965,6 @@ void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
9687     args->sa_slot = slot;
9688    
9689     res->sr_slot = slot;
9690     - res->sr_timestamp = jiffies;
9691     - res->sr_status_flags = 0;
9692     - res->sr_status = 1;
9693     -
9694     }
9695    
9696     int nfs4_setup_sequence(struct nfs_client *client,
9697     @@ -1007,6 +1010,7 @@ int nfs4_setup_sequence(struct nfs_client *client,
9698    
9699     trace_nfs4_setup_sequence(session, args);
9700     out_start:
9701     + nfs41_sequence_res_init(res);
9702     rpc_call_start(task);
9703     return 0;
9704    
9705     diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
9706     index e54d899c1848..a8951f1f7b4e 100644
9707     --- a/fs/nfs/pagelist.c
9708     +++ b/fs/nfs/pagelist.c
9709     @@ -988,6 +988,17 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
9710     }
9711     }
9712    
9713     +static void
9714     +nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
9715     + struct nfs_page *req)
9716     +{
9717     + LIST_HEAD(head);
9718     +
9719     + nfs_list_remove_request(req);
9720     + nfs_list_add_request(req, &head);
9721     + desc->pg_completion_ops->error_cleanup(&head);
9722     +}
9723     +
9724     /**
9725     * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
9726     * @desc: destination io descriptor
9727     @@ -1025,10 +1036,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
9728     nfs_page_group_unlock(req);
9729     desc->pg_moreio = 1;
9730     nfs_pageio_doio(desc);
9731     - if (desc->pg_error < 0)
9732     - return 0;
9733     - if (mirror->pg_recoalesce)
9734     - return 0;
9735     + if (desc->pg_error < 0 || mirror->pg_recoalesce)
9736     + goto out_cleanup_subreq;
9737     /* retry add_request for this subreq */
9738     nfs_page_group_lock(req);
9739     continue;
9740     @@ -1061,6 +1070,10 @@ err_ptr:
9741     desc->pg_error = PTR_ERR(subreq);
9742     nfs_page_group_unlock(req);
9743     return 0;
9744     +out_cleanup_subreq:
9745     + if (req != subreq)
9746     + nfs_pageio_cleanup_request(desc, subreq);
9747     + return 0;
9748     }
9749    
9750     static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
9751     @@ -1079,7 +1092,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
9752     struct nfs_page *req;
9753    
9754     req = list_first_entry(&head, struct nfs_page, wb_list);
9755     - nfs_list_remove_request(req);
9756     if (__nfs_pageio_add_request(desc, req))
9757     continue;
9758     if (desc->pg_error < 0) {
9759     @@ -1168,11 +1180,14 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
9760     if (nfs_pgio_has_mirroring(desc))
9761     desc->pg_mirror_idx = midx;
9762     if (!nfs_pageio_add_request_mirror(desc, dupreq))
9763     - goto out_failed;
9764     + goto out_cleanup_subreq;
9765     }
9766    
9767     return 1;
9768    
9769     +out_cleanup_subreq:
9770     + if (req != dupreq)
9771     + nfs_pageio_cleanup_request(desc, dupreq);
9772     out_failed:
9773     nfs_pageio_error_cleanup(desc);
9774     return 0;
9775     @@ -1194,7 +1209,7 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
9776     desc->pg_mirror_idx = mirror_idx;
9777     for (;;) {
9778     nfs_pageio_doio(desc);
9779     - if (!mirror->pg_recoalesce)
9780     + if (desc->pg_error < 0 || !mirror->pg_recoalesce)
9781     break;
9782     if (!nfs_do_recoalesce(desc))
9783     break;
9784     diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
9785     index 9eb8086ea841..c9cf46e0c040 100644
9786     --- a/fs/nfsd/nfs3proc.c
9787     +++ b/fs/nfsd/nfs3proc.c
9788     @@ -463,8 +463,19 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
9789     &resp->common, nfs3svc_encode_entry);
9790     memcpy(resp->verf, argp->verf, 8);
9791     resp->count = resp->buffer - argp->buffer;
9792     - if (resp->offset)
9793     - xdr_encode_hyper(resp->offset, argp->cookie);
9794     + if (resp->offset) {
9795     + loff_t offset = argp->cookie;
9796     +
9797     + if (unlikely(resp->offset1)) {
9798     + /* we ended up with offset on a page boundary */
9799     + *resp->offset = htonl(offset >> 32);
9800     + *resp->offset1 = htonl(offset & 0xffffffff);
9801     + resp->offset1 = NULL;
9802     + } else {
9803     + xdr_encode_hyper(resp->offset, offset);
9804     + }
9805     + resp->offset = NULL;
9806     + }
9807    
9808     RETURN_STATUS(nfserr);
9809     }
9810     @@ -533,6 +544,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
9811     } else {
9812     xdr_encode_hyper(resp->offset, offset);
9813     }
9814     + resp->offset = NULL;
9815     }
9816    
9817     RETURN_STATUS(nfserr);
9818     diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
9819     index 9b973f4f7d01..83919116d5cb 100644
9820     --- a/fs/nfsd/nfs3xdr.c
9821     +++ b/fs/nfsd/nfs3xdr.c
9822     @@ -921,6 +921,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
9823     } else {
9824     xdr_encode_hyper(cd->offset, offset64);
9825     }
9826     + cd->offset = NULL;
9827     }
9828    
9829     /*
9830     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
9831     index fb3c9844c82a..6a45fb00c5fc 100644
9832     --- a/fs/nfsd/nfs4state.c
9833     +++ b/fs/nfsd/nfs4state.c
9834     @@ -1544,16 +1544,16 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
9835     {
9836     u32 slotsize = slot_bytes(ca);
9837     u32 num = ca->maxreqs;
9838     - int avail;
9839     + unsigned long avail, total_avail;
9840    
9841     spin_lock(&nfsd_drc_lock);
9842     - avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
9843     - nfsd_drc_max_mem - nfsd_drc_mem_used);
9844     + total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
9845     + avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
9846     /*
9847     * Never use more than a third of the remaining memory,
9848     * unless it's the only way to give this client a slot:
9849     */
9850     - avail = clamp_t(int, avail, slotsize, avail/3);
9851     + avail = clamp_t(int, avail, slotsize, total_avail/3);
9852     num = min_t(int, num, avail / slotsize);
9853     nfsd_drc_mem_used += num * slotsize;
9854     spin_unlock(&nfsd_drc_lock);
9855     diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
9856     index 72a7681f4046..f2feb2d11bae 100644
9857     --- a/fs/nfsd/nfsctl.c
9858     +++ b/fs/nfsd/nfsctl.c
9859     @@ -1126,7 +1126,7 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
9860     case 'Y':
9861     case 'y':
9862     case '1':
9863     - if (nn->nfsd_serv)
9864     + if (!nn->nfsd_serv)
9865     return -EBUSY;
9866     nfsd4_end_grace(nn);
9867     break;
9868     diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
9869     index 9e62dcf06fc4..68b3303e4b46 100644
9870     --- a/fs/overlayfs/copy_up.c
9871     +++ b/fs/overlayfs/copy_up.c
9872     @@ -443,6 +443,24 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
9873     {
9874     int err;
9875    
9876     + /*
9877     + * Copy up data first and then xattrs. Writing data after
9878     + * xattrs will remove security.capability xattr automatically.
9879     + */
9880     + if (S_ISREG(c->stat.mode) && !c->metacopy) {
9881     + struct path upperpath, datapath;
9882     +
9883     + ovl_path_upper(c->dentry, &upperpath);
9884     + if (WARN_ON(upperpath.dentry != NULL))
9885     + return -EIO;
9886     + upperpath.dentry = temp;
9887     +
9888     + ovl_path_lowerdata(c->dentry, &datapath);
9889     + err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
9890     + if (err)
9891     + return err;
9892     + }
9893     +
9894     err = ovl_copy_xattr(c->lowerpath.dentry, temp);
9895     if (err)
9896     return err;
9897     @@ -460,19 +478,6 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
9898     return err;
9899     }
9900    
9901     - if (S_ISREG(c->stat.mode) && !c->metacopy) {
9902     - struct path upperpath, datapath;
9903     -
9904     - ovl_path_upper(c->dentry, &upperpath);
9905     - BUG_ON(upperpath.dentry != NULL);
9906     - upperpath.dentry = temp;
9907     -
9908     - ovl_path_lowerdata(c->dentry, &datapath);
9909     - err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
9910     - if (err)
9911     - return err;
9912     - }
9913     -
9914     if (c->metacopy) {
9915     err = ovl_check_setxattr(c->dentry, temp, OVL_XATTR_METACOPY,
9916     NULL, 0, -EOPNOTSUPP);
9917     @@ -737,6 +742,8 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
9918     {
9919     struct path upperpath, datapath;
9920     int err;
9921     + char *capability = NULL;
9922     + ssize_t uninitialized_var(cap_size);
9923    
9924     ovl_path_upper(c->dentry, &upperpath);
9925     if (WARN_ON(upperpath.dentry == NULL))
9926     @@ -746,15 +753,37 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
9927     if (WARN_ON(datapath.dentry == NULL))
9928     return -EIO;
9929    
9930     + if (c->stat.size) {
9931     + err = cap_size = ovl_getxattr(upperpath.dentry, XATTR_NAME_CAPS,
9932     + &capability, 0);
9933     + if (err < 0 && err != -ENODATA)
9934     + goto out;
9935     + }
9936     +
9937     err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
9938     if (err)
9939     - return err;
9940     + goto out_free;
9941     +
9942     + /*
9943     + * Writing to upper file will clear security.capability xattr. We
9944     + * don't want that to happen for normal copy-up operation.
9945     + */
9946     + if (capability) {
9947     + err = ovl_do_setxattr(upperpath.dentry, XATTR_NAME_CAPS,
9948     + capability, cap_size, 0);
9949     + if (err)
9950     + goto out_free;
9951     + }
9952     +
9953    
9954     err = vfs_removexattr(upperpath.dentry, OVL_XATTR_METACOPY);
9955     if (err)
9956     - return err;
9957     + goto out_free;
9958    
9959     ovl_set_upperdata(d_inode(c->dentry));
9960     +out_free:
9961     + kfree(capability);
9962     +out:
9963     return err;
9964     }
9965    
9966     diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
9967     index 5e45cb3630a0..9c6018287d57 100644
9968     --- a/fs/overlayfs/overlayfs.h
9969     +++ b/fs/overlayfs/overlayfs.h
9970     @@ -277,6 +277,8 @@ int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
9971     int ovl_check_metacopy_xattr(struct dentry *dentry);
9972     bool ovl_is_metacopy_dentry(struct dentry *dentry);
9973     char *ovl_get_redirect_xattr(struct dentry *dentry, int padding);
9974     +ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
9975     + size_t padding);
9976    
9977     static inline bool ovl_is_impuredir(struct dentry *dentry)
9978     {
9979     diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
9980     index 7c01327b1852..4035e640f402 100644
9981     --- a/fs/overlayfs/util.c
9982     +++ b/fs/overlayfs/util.c
9983     @@ -863,28 +863,49 @@ bool ovl_is_metacopy_dentry(struct dentry *dentry)
9984     return (oe->numlower > 1);
9985     }
9986    
9987     -char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
9988     +ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
9989     + size_t padding)
9990     {
9991     - int res;
9992     - char *s, *next, *buf = NULL;
9993     + ssize_t res;
9994     + char *buf = NULL;
9995    
9996     - res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, NULL, 0);
9997     + res = vfs_getxattr(dentry, name, NULL, 0);
9998     if (res < 0) {
9999     if (res == -ENODATA || res == -EOPNOTSUPP)
10000     - return NULL;
10001     + return -ENODATA;
10002     goto fail;
10003     }
10004    
10005     - buf = kzalloc(res + padding + 1, GFP_KERNEL);
10006     - if (!buf)
10007     - return ERR_PTR(-ENOMEM);
10008     + if (res != 0) {
10009     + buf = kzalloc(res + padding, GFP_KERNEL);
10010     + if (!buf)
10011     + return -ENOMEM;
10012    
10013     - if (res == 0)
10014     - goto invalid;
10015     + res = vfs_getxattr(dentry, name, buf, res);
10016     + if (res < 0)
10017     + goto fail;
10018     + }
10019     + *value = buf;
10020     +
10021     + return res;
10022     +
10023     +fail:
10024     + pr_warn_ratelimited("overlayfs: failed to get xattr %s: err=%zi)\n",
10025     + name, res);
10026     + kfree(buf);
10027     + return res;
10028     +}
10029    
10030     - res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, buf, res);
10031     +char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
10032     +{
10033     + int res;
10034     + char *s, *next, *buf = NULL;
10035     +
10036     + res = ovl_getxattr(dentry, OVL_XATTR_REDIRECT, &buf, padding + 1);
10037     + if (res == -ENODATA)
10038     + return NULL;
10039     if (res < 0)
10040     - goto fail;
10041     + return ERR_PTR(res);
10042     if (res == 0)
10043     goto invalid;
10044    
10045     @@ -900,15 +921,9 @@ char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
10046     }
10047    
10048     return buf;
10049     -
10050     -err_free:
10051     - kfree(buf);
10052     - return ERR_PTR(res);
10053     -fail:
10054     - pr_warn_ratelimited("overlayfs: failed to get redirect (%i)\n", res);
10055     - goto err_free;
10056     invalid:
10057     pr_warn_ratelimited("overlayfs: invalid redirect (%s)\n", buf);
10058     res = -EINVAL;
10059     - goto err_free;
10060     + kfree(buf);
10061     + return ERR_PTR(res);
10062     }
10063     diff --git a/fs/pipe.c b/fs/pipe.c
10064     index bdc5d3c0977d..c51750ed4011 100644
10065     --- a/fs/pipe.c
10066     +++ b/fs/pipe.c
10067     @@ -234,6 +234,14 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
10068     .get = generic_pipe_buf_get,
10069     };
10070    
10071     +static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
10072     + .can_merge = 0,
10073     + .confirm = generic_pipe_buf_confirm,
10074     + .release = anon_pipe_buf_release,
10075     + .steal = anon_pipe_buf_steal,
10076     + .get = generic_pipe_buf_get,
10077     +};
10078     +
10079     static const struct pipe_buf_operations packet_pipe_buf_ops = {
10080     .can_merge = 0,
10081     .confirm = generic_pipe_buf_confirm,
10082     @@ -242,6 +250,12 @@ static const struct pipe_buf_operations packet_pipe_buf_ops = {
10083     .get = generic_pipe_buf_get,
10084     };
10085    
10086     +void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
10087     +{
10088     + if (buf->ops == &anon_pipe_buf_ops)
10089     + buf->ops = &anon_pipe_buf_nomerge_ops;
10090     +}
10091     +
10092     static ssize_t
10093     pipe_read(struct kiocb *iocb, struct iov_iter *to)
10094     {
10095     diff --git a/fs/splice.c b/fs/splice.c
10096     index de2ede048473..90c29675d573 100644
10097     --- a/fs/splice.c
10098     +++ b/fs/splice.c
10099     @@ -1597,6 +1597,8 @@ retry:
10100     */
10101     obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
10102    
10103     + pipe_buf_mark_unmergeable(obuf);
10104     +
10105     obuf->len = len;
10106     opipe->nrbufs++;
10107     ibuf->offset += obuf->len;
10108     @@ -1671,6 +1673,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
10109     */
10110     obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
10111    
10112     + pipe_buf_mark_unmergeable(obuf);
10113     +
10114     if (obuf->len > len)
10115     obuf->len = len;
10116    
10117     diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
10118     index 3d7a6a9c2370..f8f6f04c4453 100644
10119     --- a/include/asm-generic/vmlinux.lds.h
10120     +++ b/include/asm-generic/vmlinux.lds.h
10121     @@ -733,7 +733,7 @@
10122     KEEP(*(.orc_unwind_ip)) \
10123     __stop_orc_unwind_ip = .; \
10124     } \
10125     - . = ALIGN(6); \
10126     + . = ALIGN(2); \
10127     .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
10128     __start_orc_unwind = .; \
10129     KEEP(*(.orc_unwind)) \
10130     diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
10131     index e528baebad69..bee4bb9f81bc 100644
10132     --- a/include/linux/device-mapper.h
10133     +++ b/include/linux/device-mapper.h
10134     @@ -609,7 +609,7 @@ do { \
10135     */
10136     #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
10137    
10138     -static inline sector_t to_sector(unsigned long n)
10139     +static inline sector_t to_sector(unsigned long long n)
10140     {
10141     return (n >> SECTOR_SHIFT);
10142     }
10143     diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
10144     index f6ded992c183..5b21f14802e1 100644
10145     --- a/include/linux/dma-mapping.h
10146     +++ b/include/linux/dma-mapping.h
10147     @@ -130,6 +130,7 @@ struct dma_map_ops {
10148     enum dma_data_direction direction);
10149     int (*dma_supported)(struct device *dev, u64 mask);
10150     u64 (*get_required_mask)(struct device *dev);
10151     + size_t (*max_mapping_size)(struct device *dev);
10152     };
10153    
10154     #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
10155     @@ -257,6 +258,8 @@ static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
10156     }
10157     #endif
10158    
10159     +size_t dma_direct_max_mapping_size(struct device *dev);
10160     +
10161     #ifdef CONFIG_HAS_DMA
10162     #include <asm/dma-mapping.h>
10163    
10164     @@ -460,6 +463,7 @@ int dma_supported(struct device *dev, u64 mask);
10165     int dma_set_mask(struct device *dev, u64 mask);
10166     int dma_set_coherent_mask(struct device *dev, u64 mask);
10167     u64 dma_get_required_mask(struct device *dev);
10168     +size_t dma_max_mapping_size(struct device *dev);
10169     #else /* CONFIG_HAS_DMA */
10170     static inline dma_addr_t dma_map_page_attrs(struct device *dev,
10171     struct page *page, size_t offset, size_t size,
10172     @@ -561,6 +565,10 @@ static inline u64 dma_get_required_mask(struct device *dev)
10173     {
10174     return 0;
10175     }
10176     +static inline size_t dma_max_mapping_size(struct device *dev)
10177     +{
10178     + return 0;
10179     +}
10180     #endif /* CONFIG_HAS_DMA */
10181    
10182     static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
10183     diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
10184     index 0fbbcdf0c178..da0af631ded5 100644
10185     --- a/include/linux/hardirq.h
10186     +++ b/include/linux/hardirq.h
10187     @@ -60,8 +60,14 @@ extern void irq_enter(void);
10188     */
10189     extern void irq_exit(void);
10190    
10191     +#ifndef arch_nmi_enter
10192     +#define arch_nmi_enter() do { } while (0)
10193     +#define arch_nmi_exit() do { } while (0)
10194     +#endif
10195     +
10196     #define nmi_enter() \
10197     do { \
10198     + arch_nmi_enter(); \
10199     printk_nmi_enter(); \
10200     lockdep_off(); \
10201     ftrace_nmi_enter(); \
10202     @@ -80,6 +86,7 @@ extern void irq_exit(void);
10203     ftrace_nmi_exit(); \
10204     lockdep_on(); \
10205     printk_nmi_exit(); \
10206     + arch_nmi_exit(); \
10207     } while (0)
10208    
10209     #endif /* LINUX_HARDIRQ_H */
10210     diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
10211     index c38cc5eb7e73..cf761ff58224 100644
10212     --- a/include/linux/kvm_host.h
10213     +++ b/include/linux/kvm_host.h
10214     @@ -634,7 +634,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
10215     struct kvm_memory_slot *dont);
10216     int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
10217     unsigned long npages);
10218     -void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
10219     +void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
10220     int kvm_arch_prepare_memory_region(struct kvm *kvm,
10221     struct kvm_memory_slot *memslot,
10222     const struct kvm_userspace_memory_region *mem,
10223     diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
10224     index 5a3bb3b7c9ad..3ecd7ea212ae 100644
10225     --- a/include/linux/pipe_fs_i.h
10226     +++ b/include/linux/pipe_fs_i.h
10227     @@ -182,6 +182,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
10228     int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
10229     int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
10230     void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
10231     +void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
10232    
10233     extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
10234    
10235     diff --git a/include/linux/property.h b/include/linux/property.h
10236     index 3789ec755fb6..65d3420dd5d1 100644
10237     --- a/include/linux/property.h
10238     +++ b/include/linux/property.h
10239     @@ -258,7 +258,7 @@ struct property_entry {
10240     #define PROPERTY_ENTRY_STRING(_name_, _val_) \
10241     (struct property_entry) { \
10242     .name = _name_, \
10243     - .length = sizeof(_val_), \
10244     + .length = sizeof(const char *), \
10245     .type = DEV_PROP_STRING, \
10246     { .value = { .str = _val_ } }, \
10247     }
10248     diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
10249     index 7c007ed7505f..29bc3a203283 100644
10250     --- a/include/linux/swiotlb.h
10251     +++ b/include/linux/swiotlb.h
10252     @@ -76,6 +76,8 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
10253     size_t size, enum dma_data_direction dir, unsigned long attrs);
10254     void __init swiotlb_exit(void);
10255     unsigned int swiotlb_max_segment(void);
10256     +size_t swiotlb_max_mapping_size(struct device *dev);
10257     +bool is_swiotlb_active(void);
10258     #else
10259     #define swiotlb_force SWIOTLB_NO_FORCE
10260     static inline bool is_swiotlb_buffer(phys_addr_t paddr)
10261     @@ -95,6 +97,15 @@ static inline unsigned int swiotlb_max_segment(void)
10262     {
10263     return 0;
10264     }
10265     +static inline size_t swiotlb_max_mapping_size(struct device *dev)
10266     +{
10267     + return SIZE_MAX;
10268     +}
10269     +
10270     +static inline bool is_swiotlb_active(void)
10271     +{
10272     + return false;
10273     +}
10274     #endif /* CONFIG_SWIOTLB */
10275    
10276     extern void swiotlb_print_info(void);
10277     diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
10278     index f31bd61c9466..503bba3c4bae 100644
10279     --- a/kernel/cgroup/cgroup.c
10280     +++ b/kernel/cgroup/cgroup.c
10281     @@ -2033,7 +2033,7 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
10282     struct cgroup_namespace *ns)
10283     {
10284     struct dentry *dentry;
10285     - bool new_sb;
10286     + bool new_sb = false;
10287    
10288     dentry = kernfs_mount(fs_type, flags, root->kf_root, magic, &new_sb);
10289    
10290     @@ -2043,6 +2043,7 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
10291     */
10292     if (!IS_ERR(dentry) && ns != &init_cgroup_ns) {
10293     struct dentry *nsdentry;
10294     + struct super_block *sb = dentry->d_sb;
10295     struct cgroup *cgrp;
10296    
10297     mutex_lock(&cgroup_mutex);
10298     @@ -2053,12 +2054,14 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
10299     spin_unlock_irq(&css_set_lock);
10300     mutex_unlock(&cgroup_mutex);
10301    
10302     - nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
10303     + nsdentry = kernfs_node_dentry(cgrp->kn, sb);
10304     dput(dentry);
10305     + if (IS_ERR(nsdentry))
10306     + deactivate_locked_super(sb);
10307     dentry = nsdentry;
10308     }
10309    
10310     - if (IS_ERR(dentry) || !new_sb)
10311     + if (!new_sb)
10312     cgroup_put(&root->cgrp);
10313    
10314     return dentry;
10315     diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
10316     index 355d16acee6d..6310ad01f915 100644
10317     --- a/kernel/dma/direct.c
10318     +++ b/kernel/dma/direct.c
10319     @@ -380,3 +380,14 @@ int dma_direct_supported(struct device *dev, u64 mask)
10320     */
10321     return mask >= __phys_to_dma(dev, min_mask);
10322     }
10323     +
10324     +size_t dma_direct_max_mapping_size(struct device *dev)
10325     +{
10326     + size_t size = SIZE_MAX;
10327     +
10328     + /* If SWIOTLB is active, use its maximum mapping size */
10329     + if (is_swiotlb_active())
10330     + size = swiotlb_max_mapping_size(dev);
10331     +
10332     + return size;
10333     +}
10334     diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
10335     index a11006b6d8e8..5753008ab286 100644
10336     --- a/kernel/dma/mapping.c
10337     +++ b/kernel/dma/mapping.c
10338     @@ -357,3 +357,17 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
10339     ops->cache_sync(dev, vaddr, size, dir);
10340     }
10341     EXPORT_SYMBOL(dma_cache_sync);
10342     +
10343     +size_t dma_max_mapping_size(struct device *dev)
10344     +{
10345     + const struct dma_map_ops *ops = get_dma_ops(dev);
10346     + size_t size = SIZE_MAX;
10347     +
10348     + if (dma_is_direct(ops))
10349     + size = dma_direct_max_mapping_size(dev);
10350     + else if (ops && ops->max_mapping_size)
10351     + size = ops->max_mapping_size(dev);
10352     +
10353     + return size;
10354     +}
10355     +EXPORT_SYMBOL_GPL(dma_max_mapping_size);
10356     diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
10357     index 1fb6fd68b9c7..c873f9cc2146 100644
10358     --- a/kernel/dma/swiotlb.c
10359     +++ b/kernel/dma/swiotlb.c
10360     @@ -662,3 +662,17 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
10361     {
10362     return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
10363     }
10364     +
10365     +size_t swiotlb_max_mapping_size(struct device *dev)
10366     +{
10367     + return ((size_t)1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
10368     +}
10369     +
10370     +bool is_swiotlb_active(void)
10371     +{
10372     + /*
10373     + * When SWIOTLB is initialized, even if io_tlb_start points to physical
10374     + * address zero, io_tlb_end surely doesn't.
10375     + */
10376     + return io_tlb_end != 0;
10377     +}
10378     diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
10379     index 9180158756d2..38d44d27e37a 100644
10380     --- a/kernel/rcu/tree.c
10381     +++ b/kernel/rcu/tree.c
10382     @@ -1557,14 +1557,23 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
10383     }
10384    
10385     /*
10386     - * Awaken the grace-period kthread. Don't do a self-awaken, and don't
10387     - * bother awakening when there is nothing for the grace-period kthread
10388     - * to do (as in several CPUs raced to awaken, and we lost), and finally
10389     - * don't try to awaken a kthread that has not yet been created.
10390     + * Awaken the grace-period kthread. Don't do a self-awaken (unless in
10391     + * an interrupt or softirq handler), and don't bother awakening when there
10392     + * is nothing for the grace-period kthread to do (as in several CPUs raced
10393     + * to awaken, and we lost), and finally don't try to awaken a kthread that
10394     + * has not yet been created. If all those checks are passed, track some
10395     + * debug information and awaken.
10396     + *
10397     + * So why do the self-wakeup when in an interrupt or softirq handler
10398     + * in the grace-period kthread's context? Because the kthread might have
10399     + * been interrupted just as it was going to sleep, and just after the final
10400     + * pre-sleep check of the awaken condition. In this case, a wakeup really
10401     + * is required, and is therefore supplied.
10402     */
10403     static void rcu_gp_kthread_wake(void)
10404     {
10405     - if (current == rcu_state.gp_kthread ||
10406     + if ((current == rcu_state.gp_kthread &&
10407     + !in_interrupt() && !in_serving_softirq()) ||
10408     !READ_ONCE(rcu_state.gp_flags) ||
10409     !rcu_state.gp_kthread)
10410     return;
10411     diff --git a/kernel/sysctl.c b/kernel/sysctl.c
10412     index ba4d9e85feb8..d80bee8ff12e 100644
10413     --- a/kernel/sysctl.c
10414     +++ b/kernel/sysctl.c
10415     @@ -2579,7 +2579,16 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
10416     {
10417     struct do_proc_dointvec_minmax_conv_param *param = data;
10418     if (write) {
10419     - int val = *negp ? -*lvalp : *lvalp;
10420     + int val;
10421     + if (*negp) {
10422     + if (*lvalp > (unsigned long) INT_MAX + 1)
10423     + return -EINVAL;
10424     + val = -*lvalp;
10425     + } else {
10426     + if (*lvalp > (unsigned long) INT_MAX)
10427     + return -EINVAL;
10428     + val = *lvalp;
10429     + }
10430     if ((param->min && *param->min > val) ||
10431     (param->max && *param->max < val))
10432     return -EINVAL;
10433     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
10434     index c4238b441624..5f40db27aaf2 100644
10435     --- a/kernel/trace/trace.c
10436     +++ b/kernel/trace/trace.c
10437     @@ -5626,7 +5626,6 @@ out:
10438     return ret;
10439    
10440     fail:
10441     - kfree(iter->trace);
10442     kfree(iter);
10443     __trace_array_put(tr);
10444     mutex_unlock(&trace_types_lock);
10445     diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
10446     index 76217bbef815..4629a6104474 100644
10447     --- a/kernel/trace/trace_event_perf.c
10448     +++ b/kernel/trace/trace_event_perf.c
10449     @@ -299,15 +299,13 @@ int perf_uprobe_init(struct perf_event *p_event,
10450    
10451     if (!p_event->attr.uprobe_path)
10452     return -EINVAL;
10453     - path = kzalloc(PATH_MAX, GFP_KERNEL);
10454     - if (!path)
10455     - return -ENOMEM;
10456     - ret = strncpy_from_user(
10457     - path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
10458     - if (ret == PATH_MAX)
10459     - return -E2BIG;
10460     - if (ret < 0)
10461     - goto out;
10462     +
10463     + path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
10464     + PATH_MAX);
10465     + if (IS_ERR(path)) {
10466     + ret = PTR_ERR(path);
10467     + return (ret == -EINVAL) ? -E2BIG : ret;
10468     + }
10469     if (path[0] == '\0') {
10470     ret = -EINVAL;
10471     goto out;
10472     diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
10473     index 449d90cfa151..55b72b1c63a0 100644
10474     --- a/kernel/trace/trace_events_hist.c
10475     +++ b/kernel/trace/trace_events_hist.c
10476     @@ -4695,9 +4695,10 @@ static inline void add_to_key(char *compound_key, void *key,
10477     /* ensure NULL-termination */
10478     if (size > key_field->size - 1)
10479     size = key_field->size - 1;
10480     - }
10481    
10482     - memcpy(compound_key + key_field->offset, key, size);
10483     + strncpy(compound_key + key_field->offset, (char *)key, size);
10484     + } else
10485     + memcpy(compound_key + key_field->offset, key, size);
10486     }
10487    
10488     static void
10489     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
10490     index 831be5ff5f4d..fc8b51744579 100644
10491     --- a/mm/memory-failure.c
10492     +++ b/mm/memory-failure.c
10493     @@ -1825,19 +1825,17 @@ static int soft_offline_in_use_page(struct page *page, int flags)
10494     struct page *hpage = compound_head(page);
10495    
10496     if (!PageHuge(page) && PageTransHuge(hpage)) {
10497     - lock_page(hpage);
10498     - if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
10499     - unlock_page(hpage);
10500     - if (!PageAnon(hpage))
10501     + lock_page(page);
10502     + if (!PageAnon(page) || unlikely(split_huge_page(page))) {
10503     + unlock_page(page);
10504     + if (!PageAnon(page))
10505     pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
10506     else
10507     pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
10508     - put_hwpoison_page(hpage);
10509     + put_hwpoison_page(page);
10510     return -EBUSY;
10511     }
10512     - unlock_page(hpage);
10513     - get_hwpoison_page(page);
10514     - put_hwpoison_page(hpage);
10515     + unlock_page(page);
10516     }
10517    
10518     /*
10519     diff --git a/mm/memory.c b/mm/memory.c
10520     index e11ca9dd823f..e8d69ade5acc 100644
10521     --- a/mm/memory.c
10522     +++ b/mm/memory.c
10523     @@ -3517,10 +3517,13 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
10524     * but allow concurrent faults).
10525     * The mmap_sem may have been released depending on flags and our
10526     * return value. See filemap_fault() and __lock_page_or_retry().
10527     + * If mmap_sem is released, vma may become invalid (for example
10528     + * by other thread calling munmap()).
10529     */
10530     static vm_fault_t do_fault(struct vm_fault *vmf)
10531     {
10532     struct vm_area_struct *vma = vmf->vma;
10533     + struct mm_struct *vm_mm = vma->vm_mm;
10534     vm_fault_t ret;
10535    
10536     /*
10537     @@ -3561,7 +3564,7 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
10538    
10539     /* preallocated pagetable is unused: free it */
10540     if (vmf->prealloc_pte) {
10541     - pte_free(vma->vm_mm, vmf->prealloc_pte);
10542     + pte_free(vm_mm, vmf->prealloc_pte);
10543     vmf->prealloc_pte = NULL;
10544     }
10545     return ret;
10546     diff --git a/mm/vmalloc.c b/mm/vmalloc.c
10547     index 871e41c55e23..2cd24186ba84 100644
10548     --- a/mm/vmalloc.c
10549     +++ b/mm/vmalloc.c
10550     @@ -2248,7 +2248,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
10551     if (!(area->flags & VM_USERMAP))
10552     return -EINVAL;
10553    
10554     - if (kaddr + size > area->addr + area->size)
10555     + if (kaddr + size > area->addr + get_vm_area_size(area))
10556     return -EINVAL;
10557    
10558     do {
10559     diff --git a/net/9p/client.c b/net/9p/client.c
10560     index 357214a51f13..b85d51f4b8eb 100644
10561     --- a/net/9p/client.c
10562     +++ b/net/9p/client.c
10563     @@ -1061,7 +1061,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
10564     p9_debug(P9_DEBUG_ERROR,
10565     "Please specify a msize of at least 4k\n");
10566     err = -EINVAL;
10567     - goto free_client;
10568     + goto close_trans;
10569     }
10570    
10571     err = p9_client_version(clnt);
10572     diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
10573     index d7ec6132c046..d455537c8fc6 100644
10574     --- a/net/sunrpc/clnt.c
10575     +++ b/net/sunrpc/clnt.c
10576     @@ -66,9 +66,6 @@ static void call_decode(struct rpc_task *task);
10577     static void call_bind(struct rpc_task *task);
10578     static void call_bind_status(struct rpc_task *task);
10579     static void call_transmit(struct rpc_task *task);
10580     -#if defined(CONFIG_SUNRPC_BACKCHANNEL)
10581     -static void call_bc_transmit(struct rpc_task *task);
10582     -#endif /* CONFIG_SUNRPC_BACKCHANNEL */
10583     static void call_status(struct rpc_task *task);
10584     static void call_transmit_status(struct rpc_task *task);
10585     static void call_refresh(struct rpc_task *task);
10586     @@ -80,6 +77,7 @@ static void call_connect_status(struct rpc_task *task);
10587     static __be32 *rpc_encode_header(struct rpc_task *task);
10588     static __be32 *rpc_verify_header(struct rpc_task *task);
10589     static int rpc_ping(struct rpc_clnt *clnt);
10590     +static void rpc_check_timeout(struct rpc_task *task);
10591    
10592     static void rpc_register_client(struct rpc_clnt *clnt)
10593     {
10594     @@ -1131,6 +1129,8 @@ rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
10595     EXPORT_SYMBOL_GPL(rpc_call_async);
10596    
10597     #if defined(CONFIG_SUNRPC_BACKCHANNEL)
10598     +static void call_bc_encode(struct rpc_task *task);
10599     +
10600     /**
10601     * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
10602     * rpc_execute against it
10603     @@ -1152,7 +1152,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
10604     task = rpc_new_task(&task_setup_data);
10605     xprt_init_bc_request(req, task);
10606    
10607     - task->tk_action = call_bc_transmit;
10608     + task->tk_action = call_bc_encode;
10609     atomic_inc(&task->tk_count);
10610     WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
10611     rpc_execute(task);
10612     @@ -1786,7 +1786,12 @@ call_encode(struct rpc_task *task)
10613     xprt_request_enqueue_receive(task);
10614     xprt_request_enqueue_transmit(task);
10615     out:
10616     - task->tk_action = call_bind;
10617     + task->tk_action = call_transmit;
10618     + /* Check that the connection is OK */
10619     + if (!xprt_bound(task->tk_xprt))
10620     + task->tk_action = call_bind;
10621     + else if (!xprt_connected(task->tk_xprt))
10622     + task->tk_action = call_connect;
10623     }
10624    
10625     /*
10626     @@ -1937,8 +1942,7 @@ call_connect_status(struct rpc_task *task)
10627     break;
10628     if (clnt->cl_autobind) {
10629     rpc_force_rebind(clnt);
10630     - task->tk_action = call_bind;
10631     - return;
10632     + goto out_retry;
10633     }
10634     /* fall through */
10635     case -ECONNRESET:
10636     @@ -1958,16 +1962,19 @@ call_connect_status(struct rpc_task *task)
10637     /* fall through */
10638     case -ENOTCONN:
10639     case -EAGAIN:
10640     - /* Check for timeouts before looping back to call_bind */
10641     case -ETIMEDOUT:
10642     - task->tk_action = call_timeout;
10643     - return;
10644     + goto out_retry;
10645     case 0:
10646     clnt->cl_stats->netreconn++;
10647     task->tk_action = call_transmit;
10648     return;
10649     }
10650     rpc_exit(task, status);
10651     + return;
10652     +out_retry:
10653     + /* Check for timeouts before looping back to call_bind */
10654     + task->tk_action = call_bind;
10655     + rpc_check_timeout(task);
10656     }
10657    
10658     /*
10659     @@ -1978,13 +1985,19 @@ call_transmit(struct rpc_task *task)
10660     {
10661     dprint_status(task);
10662    
10663     - task->tk_status = 0;
10664     + task->tk_action = call_transmit_status;
10665     if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
10666     if (!xprt_prepare_transmit(task))
10667     return;
10668     - xprt_transmit(task);
10669     + task->tk_status = 0;
10670     + if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
10671     + if (!xprt_connected(task->tk_xprt)) {
10672     + task->tk_status = -ENOTCONN;
10673     + return;
10674     + }
10675     + xprt_transmit(task);
10676     + }
10677     }
10678     - task->tk_action = call_transmit_status;
10679     xprt_end_transmit(task);
10680     }
10681    
10682     @@ -2038,7 +2051,7 @@ call_transmit_status(struct rpc_task *task)
10683     trace_xprt_ping(task->tk_xprt,
10684     task->tk_status);
10685     rpc_exit(task, task->tk_status);
10686     - break;
10687     + return;
10688     }
10689     /* fall through */
10690     case -ECONNRESET:
10691     @@ -2046,11 +2059,24 @@ call_transmit_status(struct rpc_task *task)
10692     case -EADDRINUSE:
10693     case -ENOTCONN:
10694     case -EPIPE:
10695     + task->tk_action = call_bind;
10696     + task->tk_status = 0;
10697     break;
10698     }
10699     + rpc_check_timeout(task);
10700     }
10701    
10702     #if defined(CONFIG_SUNRPC_BACKCHANNEL)
10703     +static void call_bc_transmit(struct rpc_task *task);
10704     +static void call_bc_transmit_status(struct rpc_task *task);
10705     +
10706     +static void
10707     +call_bc_encode(struct rpc_task *task)
10708     +{
10709     + xprt_request_enqueue_transmit(task);
10710     + task->tk_action = call_bc_transmit;
10711     +}
10712     +
10713     /*
10714     * 5b. Send the backchannel RPC reply. On error, drop the reply. In
10715     * addition, disconnect on connectivity errors.
10716     @@ -2058,26 +2084,23 @@ call_transmit_status(struct rpc_task *task)
10717     static void
10718     call_bc_transmit(struct rpc_task *task)
10719     {
10720     - struct rpc_rqst *req = task->tk_rqstp;
10721     -
10722     - if (rpc_task_need_encode(task))
10723     - xprt_request_enqueue_transmit(task);
10724     - if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
10725     - goto out_wakeup;
10726     -
10727     - if (!xprt_prepare_transmit(task))
10728     - goto out_retry;
10729     -
10730     - if (task->tk_status < 0) {
10731     - printk(KERN_NOTICE "RPC: Could not send backchannel reply "
10732     - "error: %d\n", task->tk_status);
10733     - goto out_done;
10734     + task->tk_action = call_bc_transmit_status;
10735     + if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
10736     + if (!xprt_prepare_transmit(task))
10737     + return;
10738     + task->tk_status = 0;
10739     + xprt_transmit(task);
10740     }
10741     + xprt_end_transmit(task);
10742     +}
10743    
10744     - xprt_transmit(task);
10745     +static void
10746     +call_bc_transmit_status(struct rpc_task *task)
10747     +{
10748     + struct rpc_rqst *req = task->tk_rqstp;
10749    
10750     - xprt_end_transmit(task);
10751     dprint_status(task);
10752     +
10753     switch (task->tk_status) {
10754     case 0:
10755     /* Success */
10756     @@ -2091,8 +2114,14 @@ call_bc_transmit(struct rpc_task *task)
10757     case -ENOTCONN:
10758     case -EPIPE:
10759     break;
10760     + case -ENOBUFS:
10761     + rpc_delay(task, HZ>>2);
10762     + /* fall through */
10763     + case -EBADSLT:
10764     case -EAGAIN:
10765     - goto out_retry;
10766     + task->tk_status = 0;
10767     + task->tk_action = call_bc_transmit;
10768     + return;
10769     case -ETIMEDOUT:
10770     /*
10771     * Problem reaching the server. Disconnect and let the
10772     @@ -2111,18 +2140,11 @@ call_bc_transmit(struct rpc_task *task)
10773     * We were unable to reply and will have to drop the
10774     * request. The server should reconnect and retransmit.
10775     */
10776     - WARN_ON_ONCE(task->tk_status == -EAGAIN);
10777     printk(KERN_NOTICE "RPC: Could not send backchannel reply "
10778     "error: %d\n", task->tk_status);
10779     break;
10780     }
10781     -out_wakeup:
10782     - rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
10783     -out_done:
10784     task->tk_action = rpc_exit_task;
10785     - return;
10786     -out_retry:
10787     - task->tk_status = 0;
10788     }
10789     #endif /* CONFIG_SUNRPC_BACKCHANNEL */
10790    
10791     @@ -2178,7 +2200,7 @@ call_status(struct rpc_task *task)
10792     case -EPIPE:
10793     case -ENOTCONN:
10794     case -EAGAIN:
10795     - task->tk_action = call_encode;
10796     + task->tk_action = call_timeout;
10797     break;
10798     case -EIO:
10799     /* shutdown or soft timeout */
10800     @@ -2192,20 +2214,13 @@ call_status(struct rpc_task *task)
10801     }
10802     }
10803    
10804     -/*
10805     - * 6a. Handle RPC timeout
10806     - * We do not release the request slot, so we keep using the
10807     - * same XID for all retransmits.
10808     - */
10809     static void
10810     -call_timeout(struct rpc_task *task)
10811     +rpc_check_timeout(struct rpc_task *task)
10812     {
10813     struct rpc_clnt *clnt = task->tk_client;
10814    
10815     - if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
10816     - dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
10817     - goto retry;
10818     - }
10819     + if (xprt_adjust_timeout(task->tk_rqstp) == 0)
10820     + return;
10821    
10822     dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
10823     task->tk_timeouts++;
10824     @@ -2241,10 +2256,19 @@ call_timeout(struct rpc_task *task)
10825     * event? RFC2203 requires the server to drop all such requests.
10826     */
10827     rpcauth_invalcred(task);
10828     +}
10829    
10830     -retry:
10831     +/*
10832     + * 6a. Handle RPC timeout
10833     + * We do not release the request slot, so we keep using the
10834     + * same XID for all retransmits.
10835     + */
10836     +static void
10837     +call_timeout(struct rpc_task *task)
10838     +{
10839     task->tk_action = call_encode;
10840     task->tk_status = 0;
10841     + rpc_check_timeout(task);
10842     }
10843    
10844     /*
10845     diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
10846     index a6a060925e5d..43590a968b73 100644
10847     --- a/net/sunrpc/svcsock.c
10848     +++ b/net/sunrpc/svcsock.c
10849     @@ -349,12 +349,16 @@ static ssize_t svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov,
10850     /*
10851     * Set socket snd and rcv buffer lengths
10852     */
10853     -static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
10854     - unsigned int rcv)
10855     +static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
10856     {
10857     + unsigned int max_mesg = svsk->sk_xprt.xpt_server->sv_max_mesg;
10858     + struct socket *sock = svsk->sk_sock;
10859     +
10860     + nreqs = min(nreqs, INT_MAX / 2 / max_mesg);
10861     +
10862     lock_sock(sock->sk);
10863     - sock->sk->sk_sndbuf = snd * 2;
10864     - sock->sk->sk_rcvbuf = rcv * 2;
10865     + sock->sk->sk_sndbuf = nreqs * max_mesg * 2;
10866     + sock->sk->sk_rcvbuf = nreqs * max_mesg * 2;
10867     sock->sk->sk_write_space(sock->sk);
10868     release_sock(sock->sk);
10869     }
10870     @@ -516,9 +520,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
10871     * provides an upper bound on the number of threads
10872     * which will access the socket.
10873     */
10874     - svc_sock_setbufsize(svsk->sk_sock,
10875     - (serv->sv_nrthreads+3) * serv->sv_max_mesg,
10876     - (serv->sv_nrthreads+3) * serv->sv_max_mesg);
10877     + svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3);
10878    
10879     clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
10880     skb = NULL;
10881     @@ -681,9 +683,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
10882     * receive and respond to one request.
10883     * svc_udp_recvfrom will re-adjust if necessary
10884     */
10885     - svc_sock_setbufsize(svsk->sk_sock,
10886     - 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
10887     - 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
10888     + svc_sock_setbufsize(svsk, 3);
10889    
10890     /* data might have come in before data_ready set up */
10891     set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
10892     diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
10893     index f0e36c3492ba..cf20dd36a30f 100644
10894     --- a/security/selinux/hooks.c
10895     +++ b/security/selinux/hooks.c
10896     @@ -959,8 +959,11 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
10897     BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
10898    
10899     /* if fs is reusing a sb, make sure that the contexts match */
10900     - if (newsbsec->flags & SE_SBINITIALIZED)
10901     + if (newsbsec->flags & SE_SBINITIALIZED) {
10902     + if ((kern_flags & SECURITY_LSM_NATIVE_LABELS) && !set_context)
10903     + *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
10904     return selinux_cmp_sb_context(oldsb, newsb);
10905     + }
10906    
10907     mutex_lock(&newsbsec->lock);
10908    
10909     @@ -5120,6 +5123,9 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
10910     return -EINVAL;
10911     }
10912    
10913     + if (walk_size + len > addrlen)
10914     + return -EINVAL;
10915     +
10916     err = -EINVAL;
10917     switch (optname) {
10918     /* Bind checks */
10919     diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
10920     index 809b7e9f03ca..c5fcc632f670 100644
10921     --- a/sound/soc/codecs/pcm186x.c
10922     +++ b/sound/soc/codecs/pcm186x.c
10923     @@ -42,7 +42,7 @@ struct pcm186x_priv {
10924     bool is_master_mode;
10925     };
10926    
10927     -static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 4000, 50);
10928     +static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 50, 0);
10929    
10930     static const struct snd_kcontrol_new pcm1863_snd_controls[] = {
10931     SOC_DOUBLE_R_S_TLV("ADC Capture Volume", PCM186X_PGA_VAL_CH1_L,
10932     @@ -158,7 +158,7 @@ static const struct snd_soc_dapm_widget pcm1863_dapm_widgets[] = {
10933     * Put the codec into SLEEP mode when not in use, allowing the
10934     * Energysense mechanism to operate.
10935     */
10936     - SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 0),
10937     + SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 1),
10938     };
10939    
10940     static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
10941     @@ -184,8 +184,8 @@ static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
10942     * Put the codec into SLEEP mode when not in use, allowing the
10943     * Energysense mechanism to operate.
10944     */
10945     - SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 0),
10946     - SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 0),
10947     + SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 1),
10948     + SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 1),
10949     };
10950    
10951     static const struct snd_soc_dapm_route pcm1863_dapm_routes[] = {
10952     diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
10953     index 57b484768a58..afe67c865330 100644
10954     --- a/sound/soc/fsl/fsl_esai.c
10955     +++ b/sound/soc/fsl/fsl_esai.c
10956     @@ -398,7 +398,8 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
10957     break;
10958     case SND_SOC_DAIFMT_RIGHT_J:
10959     /* Data on rising edge of bclk, frame high, right aligned */
10960     - xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCR_xWA;
10961     + xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP;
10962     + xcr |= ESAI_xCR_xWA;
10963     break;
10964     case SND_SOC_DAIFMT_DSP_A:
10965     /* Data on rising edge of bclk, frame high, 1clk before data */
10966     @@ -455,12 +456,12 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
10967     return -EINVAL;
10968     }
10969    
10970     - mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR;
10971     + mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR | ESAI_xCR_xWA;
10972     regmap_update_bits(esai_priv->regmap, REG_ESAI_TCR, mask, xcr);
10973     regmap_update_bits(esai_priv->regmap, REG_ESAI_RCR, mask, xcr);
10974    
10975     mask = ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCCR_xFSP |
10976     - ESAI_xCCR_xFSD | ESAI_xCCR_xCKD | ESAI_xCR_xWA;
10977     + ESAI_xCCR_xFSD | ESAI_xCCR_xCKD;
10978     regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, mask, xccr);
10979     regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, mask, xccr);
10980    
10981     diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
10982     index f69961c4a4f3..2921ce08b198 100644
10983     --- a/tools/perf/util/auxtrace.c
10984     +++ b/tools/perf/util/auxtrace.c
10985     @@ -1278,9 +1278,9 @@ static int __auxtrace_mmap__read(struct perf_mmap *map,
10986     }
10987    
10988     /* padding must be written by fn() e.g. record__process_auxtrace() */
10989     - padding = size & 7;
10990     + padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
10991     if (padding)
10992     - padding = 8 - padding;
10993     + padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
10994    
10995     memset(&ev, 0, sizeof(ev));
10996     ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
10997     diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
10998     index 8e50f96d4b23..fac32482db61 100644
10999     --- a/tools/perf/util/auxtrace.h
11000     +++ b/tools/perf/util/auxtrace.h
11001     @@ -40,6 +40,9 @@ struct record_opts;
11002     struct auxtrace_info_event;
11003     struct events_stats;
11004    
11005     +/* Auxtrace records must have the same alignment as perf event records */
11006     +#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
11007     +
11008     enum auxtrace_type {
11009     PERF_AUXTRACE_UNKNOWN,
11010     PERF_AUXTRACE_INTEL_PT,
11011     diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
11012     index 4503f3ca45ab..a54d6c9a4601 100644
11013     --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
11014     +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
11015     @@ -26,6 +26,7 @@
11016    
11017     #include "../cache.h"
11018     #include "../util.h"
11019     +#include "../auxtrace.h"
11020    
11021     #include "intel-pt-insn-decoder.h"
11022     #include "intel-pt-pkt-decoder.h"
11023     @@ -1394,7 +1395,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
11024     {
11025     intel_pt_log("ERROR: Buffer overflow\n");
11026     intel_pt_clear_tx_flags(decoder);
11027     - decoder->cbr = 0;
11028     decoder->timestamp_insn_cnt = 0;
11029     decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
11030     decoder->overflow = true;
11031     @@ -2575,6 +2575,34 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
11032     }
11033     }
11034    
11035     +#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
11036     +
11037     +/**
11038     + * adj_for_padding - adjust overlap to account for padding.
11039     + * @buf_b: second buffer
11040     + * @buf_a: first buffer
11041     + * @len_a: size of first buffer
11042     + *
11043     + * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
11044     + * accordingly.
11045     + *
11046     + * Return: A pointer into @buf_b from where non-overlapped data starts
11047     + */
11048     +static unsigned char *adj_for_padding(unsigned char *buf_b,
11049     + unsigned char *buf_a, size_t len_a)
11050     +{
11051     + unsigned char *p = buf_b - MAX_PADDING;
11052     + unsigned char *q = buf_a + len_a - MAX_PADDING;
11053     + int i;
11054     +
11055     + for (i = MAX_PADDING; i; i--, p++, q++) {
11056     + if (*p != *q)
11057     + break;
11058     + }
11059     +
11060     + return p;
11061     +}
11062     +
11063     /**
11064     * intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
11065     * using TSC.
11066     @@ -2625,8 +2653,11 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
11067    
11068     /* Same TSC, so buffers are consecutive */
11069     if (!cmp && rem_b >= rem_a) {
11070     + unsigned char *start;
11071     +
11072     *consecutive = true;
11073     - return buf_b + len_b - (rem_b - rem_a);
11074     + start = buf_b + len_b - (rem_b - rem_a);
11075     + return adj_for_padding(start, buf_a, len_a);
11076     }
11077     if (cmp < 0)
11078     return buf_b; /* tsc_a < tsc_b => no overlap */
11079     @@ -2689,7 +2720,7 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
11080     found = memmem(buf_a, len_a, buf_b, len_a);
11081     if (found) {
11082     *consecutive = true;
11083     - return buf_b + len_a;
11084     + return adj_for_padding(buf_b + len_a, buf_a, len_a);
11085     }
11086    
11087     /* Try again at next PSB in buffer 'a' */
11088     diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
11089     index 2e72373ec6df..4493fc13a6fa 100644
11090     --- a/tools/perf/util/intel-pt.c
11091     +++ b/tools/perf/util/intel-pt.c
11092     @@ -2522,6 +2522,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
11093     }
11094    
11095     pt->timeless_decoding = intel_pt_timeless_decoding(pt);
11096     + if (pt->timeless_decoding && !pt->tc.time_mult)
11097     + pt->tc.time_mult = 1;
11098     pt->have_tsc = intel_pt_have_tsc(pt);
11099     pt->sampling_mode = false;
11100     pt->est_tsc = !pt->timeless_decoding;
11101     diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
11102     index 48efad6d0f90..ca5f2e4796ea 100644
11103     --- a/tools/perf/util/symbol.c
11104     +++ b/tools/perf/util/symbol.c
11105     @@ -710,6 +710,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
11106     }
11107    
11108     pos->start -= curr_map->start - curr_map->pgoff;
11109     + if (pos->end > curr_map->end)
11110     + pos->end = curr_map->end;
11111     if (pos->end)
11112     pos->end -= curr_map->start - curr_map->pgoff;
11113     symbols__insert(&curr_map->dso->symbols, pos);
11114     diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
11115     index 30251e288629..5cc22cdaa5ba 100644
11116     --- a/virt/kvm/arm/mmu.c
11117     +++ b/virt/kvm/arm/mmu.c
11118     @@ -2353,7 +2353,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
11119     return 0;
11120     }
11121    
11122     -void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
11123     +void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
11124     {
11125     }
11126    
11127     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
11128     index 076bc38963bf..4e1024dbb73f 100644
11129     --- a/virt/kvm/kvm_main.c
11130     +++ b/virt/kvm/kvm_main.c
11131     @@ -874,6 +874,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
11132     int as_id, struct kvm_memslots *slots)
11133     {
11134     struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
11135     + u64 gen;
11136    
11137     /*
11138     * Set the low bit in the generation, which disables SPTE caching
11139     @@ -896,9 +897,11 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
11140     * space 0 will use generations 0, 4, 8, ... while * address space 1 will
11141     * use generations 2, 6, 10, 14, ...
11142     */
11143     - slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
11144     + gen = slots->generation + KVM_ADDRESS_SPACE_NUM * 2 - 1;
11145    
11146     - kvm_arch_memslots_updated(kvm, slots);
11147     + kvm_arch_memslots_updated(kvm, gen);
11148     +
11149     + slots->generation = gen;
11150    
11151     return old_memslots;
11152     }