Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.15/0103-4.15.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3087 - (hide annotations) (download)
Wed Mar 21 14:52:25 2018 UTC (6 years, 2 months ago) by niro
File size: 353682 byte(s)
-linux-4.15.4
1 niro 3087 diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
2     index fc1c884fea10..c1d520de6dfe 100644
3     --- a/Documentation/arm64/silicon-errata.txt
4     +++ b/Documentation/arm64/silicon-errata.txt
5     @@ -72,7 +72,7 @@ stable kernels.
6     | Hisilicon | Hip0{6,7} | #161010701 | N/A |
7     | Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 |
8     | | | | |
9     -| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
10     +| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
11     | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
12     | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
13     | Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 |
14     diff --git a/Documentation/devicetree/bindings/media/cec-gpio.txt b/Documentation/devicetree/bindings/media/cec-gpio.txt
15     index 46a0bac8b3b9..12fcd55ed153 100644
16     --- a/Documentation/devicetree/bindings/media/cec-gpio.txt
17     +++ b/Documentation/devicetree/bindings/media/cec-gpio.txt
18     @@ -4,6 +4,10 @@ The HDMI CEC GPIO module supports CEC implementations where the CEC line
19     is hooked up to a pull-up GPIO line and - optionally - the HPD line is
20     hooked up to another GPIO line.
21    
22     +Please note: the maximum voltage for the CEC line is 3.63V, for the HPD
23     +line it is 5.3V. So you may need some sort of level conversion circuitry
24     +when connecting them to a GPIO line.
25     +
26     Required properties:
27     - compatible: value must be "cec-gpio".
28     - cec-gpios: gpio that the CEC line is connected to. The line should be
29     @@ -21,7 +25,7 @@ the following property is optional:
30    
31     Example for the Raspberry Pi 3 where the CEC line is connected to
32     pin 26 aka BCM7 aka CE1 on the GPIO pin header and the HPD line is
33     -connected to pin 11 aka BCM17:
34     +connected to pin 11 aka BCM17 (some level shifter is needed for this!):
35    
36     #include <dt-bindings/gpio/gpio.h>
37    
38     diff --git a/Makefile b/Makefile
39     index 13566ad7863a..8495e1ca052e 100644
40     --- a/Makefile
41     +++ b/Makefile
42     @@ -1,7 +1,7 @@
43     # SPDX-License-Identifier: GPL-2.0
44     VERSION = 4
45     PATCHLEVEL = 15
46     -SUBLEVEL = 3
47     +SUBLEVEL = 4
48     EXTRAVERSION =
49     NAME = Fearless Coyote
50    
51     @@ -432,7 +432,8 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
52     export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
53    
54     export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
55     -export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN
56     +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
57     +export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
58     export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
59     export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
60     export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
61     diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
62     index d2e4da93e68c..ca3322536f72 100644
63     --- a/arch/alpha/include/asm/futex.h
64     +++ b/arch/alpha/include/asm/futex.h
65     @@ -20,8 +20,8 @@
66     "3: .subsection 2\n" \
67     "4: br 1b\n" \
68     " .previous\n" \
69     - EXC(1b,3b,%1,$31) \
70     - EXC(2b,3b,%1,$31) \
71     + EXC(1b,3b,$31,%1) \
72     + EXC(2b,3b,$31,%1) \
73     : "=&r" (oldval), "=&r"(ret) \
74     : "r" (uaddr), "r"(oparg) \
75     : "memory")
76     @@ -82,8 +82,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
77     "3: .subsection 2\n"
78     "4: br 1b\n"
79     " .previous\n"
80     - EXC(1b,3b,%0,$31)
81     - EXC(2b,3b,%0,$31)
82     + EXC(1b,3b,$31,%0)
83     + EXC(2b,3b,$31,%0)
84     : "+r"(ret), "=&r"(prev), "=&r"(cmp)
85     : "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
86     : "memory");
87     diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
88     index ce3a675c0c4b..75a5c35a2067 100644
89     --- a/arch/alpha/kernel/osf_sys.c
90     +++ b/arch/alpha/kernel/osf_sys.c
91     @@ -964,8 +964,8 @@ static inline long
92     put_tv32(struct timeval32 __user *o, struct timeval *i)
93     {
94     return copy_to_user(o, &(struct timeval32){
95     - .tv_sec = o->tv_sec,
96     - .tv_usec = o->tv_usec},
97     + .tv_sec = i->tv_sec,
98     + .tv_usec = i->tv_usec},
99     sizeof(struct timeval32));
100     }
101    
102     diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h
103     index 2e4cb74fdc41..18043af45e2b 100644
104     --- a/arch/alpha/kernel/pci_impl.h
105     +++ b/arch/alpha/kernel/pci_impl.h
106     @@ -144,7 +144,8 @@ struct pci_iommu_arena
107     };
108    
109     #if defined(CONFIG_ALPHA_SRM) && \
110     - (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA))
111     + (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA) || \
112     + defined(CONFIG_ALPHA_AVANTI))
113     # define NEED_SRM_SAVE_RESTORE
114     #else
115     # undef NEED_SRM_SAVE_RESTORE
116     diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
117     index 74bfb1f2d68e..3a885253f486 100644
118     --- a/arch/alpha/kernel/process.c
119     +++ b/arch/alpha/kernel/process.c
120     @@ -269,12 +269,13 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
121     application calling fork. */
122     if (clone_flags & CLONE_SETTLS)
123     childti->pcb.unique = regs->r20;
124     + else
125     + regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */
126     childti->pcb.usp = usp ?: rdusp();
127     *childregs = *regs;
128     childregs->r0 = 0;
129     childregs->r19 = 0;
130     childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
131     - regs->r20 = 0;
132     stack = ((struct switch_stack *) regs) - 1;
133     *childstack = *stack;
134     childstack->r26 = (unsigned long) ret_from_fork;
135     diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
136     index 4bd99a7b1c41..f43bd05dede2 100644
137     --- a/arch/alpha/kernel/traps.c
138     +++ b/arch/alpha/kernel/traps.c
139     @@ -160,11 +160,16 @@ void show_stack(struct task_struct *task, unsigned long *sp)
140     for(i=0; i < kstack_depth_to_print; i++) {
141     if (((long) stack & (THREAD_SIZE-1)) == 0)
142     break;
143     - if (i && ((i % 4) == 0))
144     - printk("\n ");
145     - printk("%016lx ", *stack++);
146     + if ((i % 4) == 0) {
147     + if (i)
148     + pr_cont("\n");
149     + printk(" ");
150     + } else {
151     + pr_cont(" ");
152     + }
153     + pr_cont("%016lx", *stack++);
154     }
155     - printk("\n");
156     + pr_cont("\n");
157     dik_show_trace(sp);
158     }
159    
160     diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
161     index 1b0e0e86ee9c..96e62ec105d0 100644
162     --- a/arch/arm/crypto/crc32-ce-glue.c
163     +++ b/arch/arm/crypto/crc32-ce-glue.c
164     @@ -188,6 +188,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
165     .base.cra_name = "crc32",
166     .base.cra_driver_name = "crc32-arm-ce",
167     .base.cra_priority = 200,
168     + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
169     .base.cra_blocksize = 1,
170     .base.cra_module = THIS_MODULE,
171     }, {
172     @@ -203,6 +204,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
173     .base.cra_name = "crc32c",
174     .base.cra_driver_name = "crc32c-arm-ce",
175     .base.cra_priority = 200,
176     + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
177     .base.cra_blocksize = 1,
178     .base.cra_module = THIS_MODULE,
179     } };
180     diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
181     index a9f7d3f47134..fdd9da1555be 100644
182     --- a/arch/arm/include/asm/kvm_host.h
183     +++ b/arch/arm/include/asm/kvm_host.h
184     @@ -301,4 +301,10 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
185     /* All host FP/SIMD state is restored on guest exit, so nothing to save: */
186     static inline void kvm_fpsimd_flush_cpu_state(void) {}
187    
188     +static inline bool kvm_arm_harden_branch_predictor(void)
189     +{
190     + /* No way to detect it yet, pretend it is not there. */
191     + return false;
192     +}
193     +
194     #endif /* __ARM_KVM_HOST_H__ */
195     diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
196     index fa6f2174276b..eb46fc81a440 100644
197     --- a/arch/arm/include/asm/kvm_mmu.h
198     +++ b/arch/arm/include/asm/kvm_mmu.h
199     @@ -221,6 +221,16 @@ static inline unsigned int kvm_get_vmid_bits(void)
200     return 8;
201     }
202    
203     +static inline void *kvm_get_hyp_vector(void)
204     +{
205     + return kvm_ksym_ref(__kvm_hyp_vector);
206     +}
207     +
208     +static inline int kvm_map_vectors(void)
209     +{
210     + return 0;
211     +}
212     +
213     #endif /* !__ASSEMBLY__ */
214    
215     #endif /* __ARM_KVM_MMU_H__ */
216     diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
217     deleted file mode 100644
218     index 6bda945d31fa..000000000000
219     --- a/arch/arm/include/asm/kvm_psci.h
220     +++ /dev/null
221     @@ -1,27 +0,0 @@
222     -/*
223     - * Copyright (C) 2012 - ARM Ltd
224     - * Author: Marc Zyngier <marc.zyngier@arm.com>
225     - *
226     - * This program is free software; you can redistribute it and/or modify
227     - * it under the terms of the GNU General Public License version 2 as
228     - * published by the Free Software Foundation.
229     - *
230     - * This program is distributed in the hope that it will be useful,
231     - * but WITHOUT ANY WARRANTY; without even the implied warranty of
232     - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
233     - * GNU General Public License for more details.
234     - *
235     - * You should have received a copy of the GNU General Public License
236     - * along with this program. If not, see <http://www.gnu.org/licenses/>.
237     - */
238     -
239     -#ifndef __ARM_KVM_PSCI_H__
240     -#define __ARM_KVM_PSCI_H__
241     -
242     -#define KVM_ARM_PSCI_0_1 1
243     -#define KVM_ARM_PSCI_0_2 2
244     -
245     -int kvm_psci_version(struct kvm_vcpu *vcpu);
246     -int kvm_psci_call(struct kvm_vcpu *vcpu);
247     -
248     -#endif /* __ARM_KVM_PSCI_H__ */
249     diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
250     index cf8bf6bf87c4..910bd8dabb3c 100644
251     --- a/arch/arm/kvm/handle_exit.c
252     +++ b/arch/arm/kvm/handle_exit.c
253     @@ -21,7 +21,7 @@
254     #include <asm/kvm_emulate.h>
255     #include <asm/kvm_coproc.h>
256     #include <asm/kvm_mmu.h>
257     -#include <asm/kvm_psci.h>
258     +#include <kvm/arm_psci.h>
259     #include <trace/events/kvm.h>
260    
261     #include "trace.h"
262     @@ -36,9 +36,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
263     kvm_vcpu_hvc_get_imm(vcpu));
264     vcpu->stat.hvc_exit_stat++;
265    
266     - ret = kvm_psci_call(vcpu);
267     + ret = kvm_hvc_call_handler(vcpu);
268     if (ret < 0) {
269     - kvm_inject_undefined(vcpu);
270     + vcpu_set_reg(vcpu, 0, ~0UL);
271     return 1;
272     }
273    
274     @@ -47,7 +47,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
275    
276     static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
277     {
278     - kvm_inject_undefined(vcpu);
279     + /*
280     + * "If an SMC instruction executed at Non-secure EL1 is
281     + * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
282     + * Trap exception, not a Secure Monitor Call exception [...]"
283     + *
284     + * We need to advance the PC after the trap, as it would
285     + * otherwise return to the same address...
286     + */
287     + vcpu_set_reg(vcpu, 0, ~0UL);
288     + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
289     return 1;
290     }
291    
292     diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
293     index c9a7e9e1414f..d22f64095ca2 100644
294     --- a/arch/arm64/Kconfig
295     +++ b/arch/arm64/Kconfig
296     @@ -522,20 +522,13 @@ config CAVIUM_ERRATUM_30115
297     config QCOM_FALKOR_ERRATUM_1003
298     bool "Falkor E1003: Incorrect translation due to ASID change"
299     default y
300     - select ARM64_PAN if ARM64_SW_TTBR0_PAN
301     help
302     On Falkor v1, an incorrect ASID may be cached in the TLB when ASID
303     - and BADDR are changed together in TTBRx_EL1. The workaround for this
304     - issue is to use a reserved ASID in cpu_do_switch_mm() before
305     - switching to the new ASID. Saying Y here selects ARM64_PAN if
306     - ARM64_SW_TTBR0_PAN is selected. This is done because implementing and
307     - maintaining the E1003 workaround in the software PAN emulation code
308     - would be an unnecessary complication. The affected Falkor v1 CPU
309     - implements ARMv8.1 hardware PAN support and using hardware PAN
310     - support versus software PAN emulation is mutually exclusive at
311     - runtime.
312     -
313     - If unsure, say Y.
314     + and BADDR are changed together in TTBRx_EL1. Since we keep the ASID
315     + in TTBR1_EL1, this situation only occurs in the entry trampoline and
316     + then only for entries in the walk cache, since the leaf translation
317     + is unchanged. Work around the erratum by invalidating the walk cache
318     + entries for the trampoline before entering the kernel proper.
319    
320     config QCOM_FALKOR_ERRATUM_1009
321     bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
322     @@ -850,6 +843,35 @@ config FORCE_MAX_ZONEORDER
323     However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
324     4M allocations matching the default size used by generic code.
325    
326     +config UNMAP_KERNEL_AT_EL0
327     + bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
328     + default y
329     + help
330     + Speculation attacks against some high-performance processors can
331     + be used to bypass MMU permission checks and leak kernel data to
332     + userspace. This can be defended against by unmapping the kernel
333     + when running in userspace, mapping it back in on exception entry
334     + via a trampoline page in the vector table.
335     +
336     + If unsure, say Y.
337     +
338     +config HARDEN_BRANCH_PREDICTOR
339     + bool "Harden the branch predictor against aliasing attacks" if EXPERT
340     + default y
341     + help
342     + Speculation attacks against some high-performance processors rely on
343     + being able to manipulate the branch predictor for a victim context by
344     + executing aliasing branches in the attacker context. Such attacks
345     + can be partially mitigated against by clearing internal branch
346     + predictor state and limiting the prediction logic in some situations.
347     +
348     + This config option will take CPU-specific actions to harden the
349     + branch predictor against aliasing attacks and may rely on specific
350     + instruction sequences or control bits being set by the system
351     + firmware.
352     +
353     + If unsure, say Y.
354     +
355     menuconfig ARMV8_DEPRECATED
356     bool "Emulate deprecated/obsolete ARMv8 instructions"
357     depends on COMPAT
358     diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
359     index 52b5341cb270..62b83416b30c 100644
360     --- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts
361     +++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
362     @@ -61,6 +61,12 @@
363     reg = <0x0 0x0 0x0 0x80000000>;
364     };
365    
366     + aliases {
367     + ethernet0 = &cpm_eth0;
368     + ethernet1 = &cpm_eth1;
369     + ethernet2 = &cpm_eth2;
370     + };
371     +
372     cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus {
373     compatible = "regulator-fixed";
374     regulator-name = "usb3h0-vbus";
375     diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
376     index d97b72bed662..d9fffde64c44 100644
377     --- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts
378     +++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
379     @@ -61,6 +61,13 @@
380     reg = <0x0 0x0 0x0 0x80000000>;
381     };
382    
383     + aliases {
384     + ethernet0 = &cpm_eth0;
385     + ethernet1 = &cpm_eth2;
386     + ethernet2 = &cps_eth0;
387     + ethernet3 = &cps_eth1;
388     + };
389     +
390     cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus {
391     compatible = "regulator-fixed";
392     regulator-name = "cpm-usb3h0-vbus";
393     diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
394     index b3350827ee55..945f7bd22802 100644
395     --- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
396     +++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
397     @@ -62,6 +62,12 @@
398     reg = <0x0 0x0 0x0 0x80000000>;
399     };
400    
401     + aliases {
402     + ethernet0 = &cpm_eth0;
403     + ethernet1 = &cps_eth0;
404     + ethernet2 = &cps_eth1;
405     + };
406     +
407     /* Regulator labels correspond with schematics */
408     v_3_3: regulator-3-3v {
409     compatible = "regulator-fixed";
410     diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c
411     index 624f4137918c..34b4e3d46aab 100644
412     --- a/arch/arm64/crypto/crc32-ce-glue.c
413     +++ b/arch/arm64/crypto/crc32-ce-glue.c
414     @@ -185,6 +185,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
415     .base.cra_name = "crc32",
416     .base.cra_driver_name = "crc32-arm64-ce",
417     .base.cra_priority = 200,
418     + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
419     .base.cra_blocksize = 1,
420     .base.cra_module = THIS_MODULE,
421     }, {
422     @@ -200,6 +201,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
423     .base.cra_name = "crc32c",
424     .base.cra_driver_name = "crc32c-arm64-ce",
425     .base.cra_priority = 200,
426     + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
427     .base.cra_blocksize = 1,
428     .base.cra_module = THIS_MODULE,
429     } };
430     diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
431     index b3da6c886835..dd49c3567f20 100644
432     --- a/arch/arm64/include/asm/asm-uaccess.h
433     +++ b/arch/arm64/include/asm/asm-uaccess.h
434     @@ -4,6 +4,7 @@
435    
436     #include <asm/alternative.h>
437     #include <asm/kernel-pgtable.h>
438     +#include <asm/mmu.h>
439     #include <asm/sysreg.h>
440     #include <asm/assembler.h>
441    
442     @@ -13,51 +14,62 @@
443     #ifdef CONFIG_ARM64_SW_TTBR0_PAN
444     .macro __uaccess_ttbr0_disable, tmp1
445     mrs \tmp1, ttbr1_el1 // swapper_pg_dir
446     + bic \tmp1, \tmp1, #TTBR_ASID_MASK
447     add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
448     msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
449     isb
450     + sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE
451     + msr ttbr1_el1, \tmp1 // set reserved ASID
452     + isb
453     .endm
454    
455     - .macro __uaccess_ttbr0_enable, tmp1
456     + .macro __uaccess_ttbr0_enable, tmp1, tmp2
457     get_thread_info \tmp1
458     ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
459     + mrs \tmp2, ttbr1_el1
460     + extr \tmp2, \tmp2, \tmp1, #48
461     + ror \tmp2, \tmp2, #16
462     + msr ttbr1_el1, \tmp2 // set the active ASID
463     + isb
464     msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
465     isb
466     .endm
467    
468     - .macro uaccess_ttbr0_disable, tmp1
469     + .macro uaccess_ttbr0_disable, tmp1, tmp2
470     alternative_if_not ARM64_HAS_PAN
471     + save_and_disable_irq \tmp2 // avoid preemption
472     __uaccess_ttbr0_disable \tmp1
473     + restore_irq \tmp2
474     alternative_else_nop_endif
475     .endm
476    
477     - .macro uaccess_ttbr0_enable, tmp1, tmp2
478     + .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
479     alternative_if_not ARM64_HAS_PAN
480     - save_and_disable_irq \tmp2 // avoid preemption
481     - __uaccess_ttbr0_enable \tmp1
482     - restore_irq \tmp2
483     + save_and_disable_irq \tmp3 // avoid preemption
484     + __uaccess_ttbr0_enable \tmp1, \tmp2
485     + restore_irq \tmp3
486     alternative_else_nop_endif
487     .endm
488     #else
489     - .macro uaccess_ttbr0_disable, tmp1
490     + .macro uaccess_ttbr0_disable, tmp1, tmp2
491     .endm
492    
493     - .macro uaccess_ttbr0_enable, tmp1, tmp2
494     + .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
495     .endm
496     #endif
497    
498     /*
499     * These macros are no-ops when UAO is present.
500     */
501     - .macro uaccess_disable_not_uao, tmp1
502     - uaccess_ttbr0_disable \tmp1
503     + .macro uaccess_disable_not_uao, tmp1, tmp2
504     + uaccess_ttbr0_disable \tmp1, \tmp2
505     alternative_if ARM64_ALT_PAN_NOT_UAO
506     SET_PSTATE_PAN(1)
507     alternative_else_nop_endif
508     .endm
509    
510     - .macro uaccess_enable_not_uao, tmp1, tmp2
511     - uaccess_ttbr0_enable \tmp1, \tmp2
512     + .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
513     + uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
514     alternative_if ARM64_ALT_PAN_NOT_UAO
515     SET_PSTATE_PAN(0)
516     alternative_else_nop_endif
517     diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
518     index 8b168280976f..b05565dd50b6 100644
519     --- a/arch/arm64/include/asm/assembler.h
520     +++ b/arch/arm64/include/asm/assembler.h
521     @@ -26,7 +26,6 @@
522     #include <asm/asm-offsets.h>
523     #include <asm/cpufeature.h>
524     #include <asm/debug-monitors.h>
525     -#include <asm/mmu_context.h>
526     #include <asm/page.h>
527     #include <asm/pgtable-hwdef.h>
528     #include <asm/ptrace.h>
529     @@ -109,6 +108,24 @@
530     dmb \opt
531     .endm
532    
533     +/*
534     + * Value prediction barrier
535     + */
536     + .macro csdb
537     + hint #20
538     + .endm
539     +
540     +/*
541     + * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
542     + * of bounds.
543     + */
544     + .macro mask_nospec64, idx, limit, tmp
545     + sub \tmp, \idx, \limit
546     + bic \tmp, \tmp, \idx
547     + and \idx, \idx, \tmp, asr #63
548     + csdb
549     + .endm
550     +
551     /*
552     * NOP sequence
553     */
554     @@ -477,39 +494,8 @@ alternative_endif
555     mrs \rd, sp_el0
556     .endm
557    
558     -/*
559     - * Errata workaround prior to TTBR0_EL1 update
560     - *
561     - * val: TTBR value with new BADDR, preserved
562     - * tmp0: temporary register, clobbered
563     - * tmp1: other temporary register, clobbered
564     - */
565     - .macro pre_ttbr0_update_workaround, val, tmp0, tmp1
566     -#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
567     -alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
568     - mrs \tmp0, ttbr0_el1
569     - mov \tmp1, #FALKOR_RESERVED_ASID
570     - bfi \tmp0, \tmp1, #48, #16 // reserved ASID + old BADDR
571     - msr ttbr0_el1, \tmp0
572     - isb
573     - bfi \tmp0, \val, #0, #48 // reserved ASID + new BADDR
574     - msr ttbr0_el1, \tmp0
575     - isb
576     -alternative_else_nop_endif
577     -#endif
578     - .endm
579     -
580     -/*
581     - * Errata workaround post TTBR0_EL1 update.
582     - */
583     - .macro post_ttbr0_update_workaround
584     -#ifdef CONFIG_CAVIUM_ERRATUM_27456
585     -alternative_if ARM64_WORKAROUND_CAVIUM_27456
586     - ic iallu
587     - dsb nsh
588     - isb
589     -alternative_else_nop_endif
590     -#endif
591     + .macro pte_to_phys, phys, pte
592     + and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
593     .endm
594    
595     /**
596     diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
597     index 77651c49ef44..f11518af96a9 100644
598     --- a/arch/arm64/include/asm/barrier.h
599     +++ b/arch/arm64/include/asm/barrier.h
600     @@ -32,6 +32,7 @@
601     #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
602    
603     #define psb_csync() asm volatile("hint #17" : : : "memory")
604     +#define csdb() asm volatile("hint #20" : : : "memory")
605    
606     #define mb() dsb(sy)
607     #define rmb() dsb(ld)
608     @@ -40,6 +41,27 @@
609     #define dma_rmb() dmb(oshld)
610     #define dma_wmb() dmb(oshst)
611    
612     +/*
613     + * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
614     + * and 0 otherwise.
615     + */
616     +#define array_index_mask_nospec array_index_mask_nospec
617     +static inline unsigned long array_index_mask_nospec(unsigned long idx,
618     + unsigned long sz)
619     +{
620     + unsigned long mask;
621     +
622     + asm volatile(
623     + " cmp %1, %2\n"
624     + " sbc %0, xzr, xzr\n"
625     + : "=r" (mask)
626     + : "r" (idx), "Ir" (sz)
627     + : "cc");
628     +
629     + csdb();
630     + return mask;
631     +}
632     +
633     #define __smp_mb() dmb(ish)
634     #define __smp_rmb() dmb(ishld)
635     #define __smp_wmb() dmb(ishst)
636     diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
637     index 2ff7c5e8efab..7049b4802587 100644
638     --- a/arch/arm64/include/asm/cpucaps.h
639     +++ b/arch/arm64/include/asm/cpucaps.h
640     @@ -41,7 +41,10 @@
641     #define ARM64_WORKAROUND_CAVIUM_30115 20
642     #define ARM64_HAS_DCPOP 21
643     #define ARM64_SVE 22
644     +#define ARM64_UNMAP_KERNEL_AT_EL0 23
645     +#define ARM64_HARDEN_BRANCH_PREDICTOR 24
646     +#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
647    
648     -#define ARM64_NCAPS 23
649     +#define ARM64_NCAPS 26
650    
651     #endif /* __ASM_CPUCAPS_H */
652     diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
653     index cbf08d7cbf30..be7bd19c87ec 100644
654     --- a/arch/arm64/include/asm/cputype.h
655     +++ b/arch/arm64/include/asm/cputype.h
656     @@ -79,28 +79,37 @@
657     #define ARM_CPU_PART_AEM_V8 0xD0F
658     #define ARM_CPU_PART_FOUNDATION 0xD00
659     #define ARM_CPU_PART_CORTEX_A57 0xD07
660     +#define ARM_CPU_PART_CORTEX_A72 0xD08
661     #define ARM_CPU_PART_CORTEX_A53 0xD03
662     #define ARM_CPU_PART_CORTEX_A73 0xD09
663     +#define ARM_CPU_PART_CORTEX_A75 0xD0A
664    
665     #define APM_CPU_PART_POTENZA 0x000
666    
667     #define CAVIUM_CPU_PART_THUNDERX 0x0A1
668     #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
669     #define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
670     +#define CAVIUM_CPU_PART_THUNDERX2 0x0AF
671    
672     #define BRCM_CPU_PART_VULCAN 0x516
673    
674     #define QCOM_CPU_PART_FALKOR_V1 0x800
675     #define QCOM_CPU_PART_FALKOR 0xC00
676     +#define QCOM_CPU_PART_KRYO 0x200
677    
678     #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
679     #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
680     +#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
681     #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
682     +#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
683     #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
684     #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
685     #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
686     +#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
687     +#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
688     #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
689     #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
690     +#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
691    
692     #ifndef __ASSEMBLY__
693    
694     diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
695     index c4cd5081d78b..8389050328bb 100644
696     --- a/arch/arm64/include/asm/efi.h
697     +++ b/arch/arm64/include/asm/efi.h
698     @@ -121,19 +121,21 @@ static inline void efi_set_pgd(struct mm_struct *mm)
699     if (mm != current->active_mm) {
700     /*
701     * Update the current thread's saved ttbr0 since it is
702     - * restored as part of a return from exception. Set
703     - * the hardware TTBR0_EL1 using cpu_switch_mm()
704     - * directly to enable potential errata workarounds.
705     + * restored as part of a return from exception. Enable
706     + * access to the valid TTBR0_EL1 and invoke the errata
707     + * workaround directly since there is no return from
708     + * exception when invoking the EFI run-time services.
709     */
710     update_saved_ttbr0(current, mm);
711     - cpu_switch_mm(mm->pgd, mm);
712     + uaccess_ttbr0_enable();
713     + post_ttbr_update_workaround();
714     } else {
715     /*
716     * Defer the switch to the current thread's TTBR0_EL1
717     * until uaccess_enable(). Restore the current
718     * thread's saved ttbr0 corresponding to its active_mm
719     */
720     - cpu_set_reserved_ttbr0();
721     + uaccess_ttbr0_disable();
722     update_saved_ttbr0(current, current->active_mm);
723     }
724     }
725     diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
726     index 4052ec39e8db..ec1e6d6fa14c 100644
727     --- a/arch/arm64/include/asm/fixmap.h
728     +++ b/arch/arm64/include/asm/fixmap.h
729     @@ -58,6 +58,11 @@ enum fixed_addresses {
730     FIX_APEI_GHES_NMI,
731     #endif /* CONFIG_ACPI_APEI_GHES */
732    
733     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
734     + FIX_ENTRY_TRAMP_DATA,
735     + FIX_ENTRY_TRAMP_TEXT,
736     +#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
737     +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
738     __end_of_permanent_fixed_addresses,
739    
740     /*
741     diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
742     index 5bb2fd4674e7..07fe2479d310 100644
743     --- a/arch/arm64/include/asm/futex.h
744     +++ b/arch/arm64/include/asm/futex.h
745     @@ -48,9 +48,10 @@ do { \
746     } while (0)
747    
748     static inline int
749     -arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
750     +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
751     {
752     int oldval = 0, ret, tmp;
753     + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
754    
755     pagefault_disable();
756    
757     @@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
758     }
759    
760     static inline int
761     -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
762     +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
763     u32 oldval, u32 newval)
764     {
765     int ret = 0;
766     u32 val, tmp;
767     + u32 __user *uaddr;
768    
769     - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
770     + if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32)))
771     return -EFAULT;
772    
773     + uaddr = __uaccess_mask_ptr(_uaddr);
774     uaccess_enable();
775     asm volatile("// futex_atomic_cmpxchg_inatomic\n"
776     " prfm pstl1strm, %2\n"
777     diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
778     index ab4d0a926043..24961b732e65 100644
779     --- a/arch/arm64/include/asm/kvm_asm.h
780     +++ b/arch/arm64/include/asm/kvm_asm.h
781     @@ -68,6 +68,8 @@ extern u32 __kvm_get_mdcr_el2(void);
782    
783     extern u32 __init_stage2_translation(void);
784    
785     +extern void __qcom_hyp_sanitize_btac_predictors(void);
786     +
787     #endif
788    
789     #endif /* __ARM_KVM_ASM_H__ */
790     diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
791     index ea6cb5b24258..20cd5b514773 100644
792     --- a/arch/arm64/include/asm/kvm_host.h
793     +++ b/arch/arm64/include/asm/kvm_host.h
794     @@ -396,4 +396,9 @@ static inline void kvm_fpsimd_flush_cpu_state(void)
795     sve_flush_cpu_state();
796     }
797    
798     +static inline bool kvm_arm_harden_branch_predictor(void)
799     +{
800     + return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
801     +}
802     +
803     #endif /* __ARM64_KVM_HOST_H__ */
804     diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
805     index 672c8684d5c2..2d6d4bd9de52 100644
806     --- a/arch/arm64/include/asm/kvm_mmu.h
807     +++ b/arch/arm64/include/asm/kvm_mmu.h
808     @@ -309,5 +309,43 @@ static inline unsigned int kvm_get_vmid_bits(void)
809     return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
810     }
811    
812     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
813     +#include <asm/mmu.h>
814     +
815     +static inline void *kvm_get_hyp_vector(void)
816     +{
817     + struct bp_hardening_data *data = arm64_get_bp_hardening_data();
818     + void *vect = kvm_ksym_ref(__kvm_hyp_vector);
819     +
820     + if (data->fn) {
821     + vect = __bp_harden_hyp_vecs_start +
822     + data->hyp_vectors_slot * SZ_2K;
823     +
824     + if (!has_vhe())
825     + vect = lm_alias(vect);
826     + }
827     +
828     + return vect;
829     +}
830     +
831     +static inline int kvm_map_vectors(void)
832     +{
833     + return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
834     + kvm_ksym_ref(__bp_harden_hyp_vecs_end),
835     + PAGE_HYP_EXEC);
836     +}
837     +
838     +#else
839     +static inline void *kvm_get_hyp_vector(void)
840     +{
841     + return kvm_ksym_ref(__kvm_hyp_vector);
842     +}
843     +
844     +static inline int kvm_map_vectors(void)
845     +{
846     + return 0;
847     +}
848     +#endif
849     +
850     #endif /* __ASSEMBLY__ */
851     #endif /* __ARM64_KVM_MMU_H__ */
852     diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h
853     deleted file mode 100644
854     index bc39e557c56c..000000000000
855     --- a/arch/arm64/include/asm/kvm_psci.h
856     +++ /dev/null
857     @@ -1,27 +0,0 @@
858     -/*
859     - * Copyright (C) 2012,2013 - ARM Ltd
860     - * Author: Marc Zyngier <marc.zyngier@arm.com>
861     - *
862     - * This program is free software; you can redistribute it and/or modify
863     - * it under the terms of the GNU General Public License version 2 as
864     - * published by the Free Software Foundation.
865     - *
866     - * This program is distributed in the hope that it will be useful,
867     - * but WITHOUT ANY WARRANTY; without even the implied warranty of
868     - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
869     - * GNU General Public License for more details.
870     - *
871     - * You should have received a copy of the GNU General Public License
872     - * along with this program. If not, see <http://www.gnu.org/licenses/>.
873     - */
874     -
875     -#ifndef __ARM64_KVM_PSCI_H__
876     -#define __ARM64_KVM_PSCI_H__
877     -
878     -#define KVM_ARM_PSCI_0_1 1
879     -#define KVM_ARM_PSCI_0_2 2
880     -
881     -int kvm_psci_version(struct kvm_vcpu *vcpu);
882     -int kvm_psci_call(struct kvm_vcpu *vcpu);
883     -
884     -#endif /* __ARM64_KVM_PSCI_H__ */
885     diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
886     index 0d34bf0a89c7..6dd83d75b82a 100644
887     --- a/arch/arm64/include/asm/mmu.h
888     +++ b/arch/arm64/include/asm/mmu.h
889     @@ -17,6 +17,10 @@
890     #define __ASM_MMU_H
891    
892     #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
893     +#define USER_ASID_FLAG (UL(1) << 48)
894     +#define TTBR_ASID_MASK (UL(0xffff) << 48)
895     +
896     +#ifndef __ASSEMBLY__
897    
898     typedef struct {
899     atomic64_t id;
900     @@ -31,6 +35,49 @@ typedef struct {
901     */
902     #define ASID(mm) ((mm)->context.id.counter & 0xffff)
903    
904     +static inline bool arm64_kernel_unmapped_at_el0(void)
905     +{
906     + return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
907     + cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
908     +}
909     +
910     +typedef void (*bp_hardening_cb_t)(void);
911     +
912     +struct bp_hardening_data {
913     + int hyp_vectors_slot;
914     + bp_hardening_cb_t fn;
915     +};
916     +
917     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
918     +extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
919     +
920     +DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
921     +
922     +static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
923     +{
924     + return this_cpu_ptr(&bp_hardening_data);
925     +}
926     +
927     +static inline void arm64_apply_bp_hardening(void)
928     +{
929     + struct bp_hardening_data *d;
930     +
931     + if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
932     + return;
933     +
934     + d = arm64_get_bp_hardening_data();
935     + if (d->fn)
936     + d->fn();
937     +}
938     +#else
939     +static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
940     +{
941     + return NULL;
942     +}
943     +
944     +static inline void arm64_apply_bp_hardening(void) { }
945     +#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
946     +
947     extern void paging_init(void);
948     extern void bootmem_init(void);
949     extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
950     @@ -41,4 +88,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
951     extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
952     extern void mark_linear_text_alias_ro(void);
953    
954     +#endif /* !__ASSEMBLY__ */
955     #endif
956     diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
957     index 9d155fa9a507..779d7a2ec5ec 100644
958     --- a/arch/arm64/include/asm/mmu_context.h
959     +++ b/arch/arm64/include/asm/mmu_context.h
960     @@ -19,8 +19,6 @@
961     #ifndef __ASM_MMU_CONTEXT_H
962     #define __ASM_MMU_CONTEXT_H
963    
964     -#define FALKOR_RESERVED_ASID 1
965     -
966     #ifndef __ASSEMBLY__
967    
968     #include <linux/compiler.h>
969     @@ -57,6 +55,13 @@ static inline void cpu_set_reserved_ttbr0(void)
970     isb();
971     }
972    
973     +static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
974     +{
975     + BUG_ON(pgd == swapper_pg_dir);
976     + cpu_set_reserved_ttbr0();
977     + cpu_do_switch_mm(virt_to_phys(pgd),mm);
978     +}
979     +
980     /*
981     * TCR.T0SZ value to use when the ID map is active. Usually equals
982     * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
983     @@ -170,7 +175,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
984     else
985     ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
986    
987     - task_thread_info(tsk)->ttbr0 = ttbr;
988     + WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
989     }
990     #else
991     static inline void update_saved_ttbr0(struct task_struct *tsk,
992     @@ -225,6 +230,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
993     #define activate_mm(prev,next) switch_mm(prev, next, current)
994    
995     void verify_cpu_asid_bits(void);
996     +void post_ttbr_update_workaround(void);
997    
998     #endif /* !__ASSEMBLY__ */
999    
1000     diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
1001     index eb0c2bd90de9..8df4cb6ac6f7 100644
1002     --- a/arch/arm64/include/asm/pgtable-hwdef.h
1003     +++ b/arch/arm64/include/asm/pgtable-hwdef.h
1004     @@ -272,6 +272,7 @@
1005     #define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT)
1006     #define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT)
1007    
1008     +#define TCR_A1 (UL(1) << 22)
1009     #define TCR_ASID16 (UL(1) << 36)
1010     #define TCR_TBI0 (UL(1) << 37)
1011     #define TCR_HA (UL(1) << 39)
1012     diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
1013     index 0a5635fb0ef9..2db84df5eb42 100644
1014     --- a/arch/arm64/include/asm/pgtable-prot.h
1015     +++ b/arch/arm64/include/asm/pgtable-prot.h
1016     @@ -34,8 +34,14 @@
1017    
1018     #include <asm/pgtable-types.h>
1019    
1020     -#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
1021     -#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
1022     +#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
1023     +#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
1024     +
1025     +#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
1026     +#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
1027     +
1028     +#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
1029     +#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
1030    
1031     #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
1032     #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
1033     @@ -47,23 +53,24 @@
1034     #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
1035     #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
1036    
1037     -#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
1038     +#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
1039     +#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT
1040    
1041     -#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
1042     -#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
1043     -#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
1044     -#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
1045     -#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
1046     +#define PAGE_KERNEL __pgprot(PROT_NORMAL)
1047     +#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
1048     +#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
1049     +#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
1050     +#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
1051    
1052     -#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
1053     -#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
1054     -#define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
1055     +#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
1056     +#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
1057     +#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
1058     #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
1059    
1060     -#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
1061     -#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
1062     +#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
1063     +#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
1064    
1065     -#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
1066     +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
1067     #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
1068     #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
1069     #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
1070     diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
1071     index bdcc7f1c9d06..e74394e7b4a6 100644
1072     --- a/arch/arm64/include/asm/pgtable.h
1073     +++ b/arch/arm64/include/asm/pgtable.h
1074     @@ -683,6 +683,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1075    
1076     extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
1077     extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
1078     +extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
1079    
1080     /*
1081     * Encode and decode a swap entry:
1082     diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
1083     index 14ad6e4e87d1..16cef2e8449e 100644
1084     --- a/arch/arm64/include/asm/proc-fns.h
1085     +++ b/arch/arm64/include/asm/proc-fns.h
1086     @@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
1087    
1088     #include <asm/memory.h>
1089    
1090     -#define cpu_switch_mm(pgd,mm) \
1091     -do { \
1092     - BUG_ON(pgd == swapper_pg_dir); \
1093     - cpu_do_switch_mm(virt_to_phys(pgd),mm); \
1094     -} while (0)
1095     -
1096     #endif /* __ASSEMBLY__ */
1097     #endif /* __KERNEL__ */
1098     #endif /* __ASM_PROCFNS_H */
1099     diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
1100     index 023cacb946c3..f96a13556887 100644
1101     --- a/arch/arm64/include/asm/processor.h
1102     +++ b/arch/arm64/include/asm/processor.h
1103     @@ -21,6 +21,9 @@
1104    
1105     #define TASK_SIZE_64 (UL(1) << VA_BITS)
1106    
1107     +#define KERNEL_DS UL(-1)
1108     +#define USER_DS (TASK_SIZE_64 - 1)
1109     +
1110     #ifndef __ASSEMBLY__
1111    
1112     /*
1113     diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
1114     index 08cc88574659..871744973ece 100644
1115     --- a/arch/arm64/include/asm/sysreg.h
1116     +++ b/arch/arm64/include/asm/sysreg.h
1117     @@ -437,6 +437,8 @@
1118     #define ID_AA64ISAR1_DPB_SHIFT 0
1119    
1120     /* id_aa64pfr0 */
1121     +#define ID_AA64PFR0_CSV3_SHIFT 60
1122     +#define ID_AA64PFR0_CSV2_SHIFT 56
1123     #define ID_AA64PFR0_SVE_SHIFT 32
1124     #define ID_AA64PFR0_GIC_SHIFT 24
1125     #define ID_AA64PFR0_ASIMD_SHIFT 20
1126     diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
1127     index af1c76981911..9e82dd79c7db 100644
1128     --- a/arch/arm64/include/asm/tlbflush.h
1129     +++ b/arch/arm64/include/asm/tlbflush.h
1130     @@ -23,6 +23,7 @@
1131    
1132     #include <linux/sched.h>
1133     #include <asm/cputype.h>
1134     +#include <asm/mmu.h>
1135    
1136     /*
1137     * Raw TLBI operations.
1138     @@ -54,6 +55,11 @@
1139    
1140     #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
1141    
1142     +#define __tlbi_user(op, arg) do { \
1143     + if (arm64_kernel_unmapped_at_el0()) \
1144     + __tlbi(op, (arg) | USER_ASID_FLAG); \
1145     +} while (0)
1146     +
1147     /*
1148     * TLB Management
1149     * ==============
1150     @@ -115,6 +121,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
1151    
1152     dsb(ishst);
1153     __tlbi(aside1is, asid);
1154     + __tlbi_user(aside1is, asid);
1155     dsb(ish);
1156     }
1157    
1158     @@ -125,6 +132,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
1159    
1160     dsb(ishst);
1161     __tlbi(vale1is, addr);
1162     + __tlbi_user(vale1is, addr);
1163     dsb(ish);
1164     }
1165    
1166     @@ -151,10 +159,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
1167    
1168     dsb(ishst);
1169     for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
1170     - if (last_level)
1171     + if (last_level) {
1172     __tlbi(vale1is, addr);
1173     - else
1174     + __tlbi_user(vale1is, addr);
1175     + } else {
1176     __tlbi(vae1is, addr);
1177     + __tlbi_user(vae1is, addr);
1178     + }
1179     }
1180     dsb(ish);
1181     }
1182     @@ -194,6 +205,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
1183     unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
1184    
1185     __tlbi(vae1is, addr);
1186     + __tlbi_user(vae1is, addr);
1187     dsb(ish);
1188     }
1189    
1190     diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
1191     index fc0f9eb66039..fad8c1b2ca3e 100644
1192     --- a/arch/arm64/include/asm/uaccess.h
1193     +++ b/arch/arm64/include/asm/uaccess.h
1194     @@ -35,16 +35,20 @@
1195     #include <asm/compiler.h>
1196     #include <asm/extable.h>
1197    
1198     -#define KERNEL_DS (-1UL)
1199     #define get_ds() (KERNEL_DS)
1200     -
1201     -#define USER_DS TASK_SIZE_64
1202     #define get_fs() (current_thread_info()->addr_limit)
1203    
1204     static inline void set_fs(mm_segment_t fs)
1205     {
1206     current_thread_info()->addr_limit = fs;
1207    
1208     + /*
1209     + * Prevent a mispredicted conditional call to set_fs from forwarding
1210     + * the wrong address limit to access_ok under speculation.
1211     + */
1212     + dsb(nsh);
1213     + isb();
1214     +
1215     /* On user-mode return, check fs is correct */
1216     set_thread_flag(TIF_FSCHECK);
1217    
1218     @@ -66,22 +70,32 @@ static inline void set_fs(mm_segment_t fs)
1219     * Returns 1 if the range is valid, 0 otherwise.
1220     *
1221     * This is equivalent to the following test:
1222     - * (u65)addr + (u65)size <= current->addr_limit
1223     - *
1224     - * This needs 65-bit arithmetic.
1225     + * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
1226     */
1227     -#define __range_ok(addr, size) \
1228     -({ \
1229     - unsigned long __addr = (unsigned long)(addr); \
1230     - unsigned long flag, roksum; \
1231     - __chk_user_ptr(addr); \
1232     - asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
1233     - : "=&r" (flag), "=&r" (roksum) \
1234     - : "1" (__addr), "Ir" (size), \
1235     - "r" (current_thread_info()->addr_limit) \
1236     - : "cc"); \
1237     - flag; \
1238     -})
1239     +static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
1240     +{
1241     + unsigned long limit = current_thread_info()->addr_limit;
1242     +
1243     + __chk_user_ptr(addr);
1244     + asm volatile(
1245     + // A + B <= C + 1 for all A,B,C, in four easy steps:
1246     + // 1: X = A + B; X' = X % 2^64
1247     + " adds %0, %0, %2\n"
1248     + // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
1249     + " csel %1, xzr, %1, hi\n"
1250     + // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
1251     + // to compensate for the carry flag being set in step 4. For
1252     + // X > 2^64, X' merely has to remain nonzero, which it does.
1253     + " csinv %0, %0, xzr, cc\n"
1254     + // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
1255     + // comes from the carry in being clear. Otherwise, we are
1256     + // testing X' - C == 0, subject to the previous adjustments.
1257     + " sbcs xzr, %0, %1\n"
1258     + " cset %0, ls\n"
1259     + : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
1260     +
1261     + return addr;
1262     +}
1263    
1264     /*
1265     * When dealing with data aborts, watchpoints, or instruction traps we may end
1266     @@ -90,7 +104,7 @@ static inline void set_fs(mm_segment_t fs)
1267     */
1268     #define untagged_addr(addr) sign_extend64(addr, 55)
1269    
1270     -#define access_ok(type, addr, size) __range_ok(addr, size)
1271     +#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
1272     #define user_addr_max get_fs
1273    
1274     #define _ASM_EXTABLE(from, to) \
1275     @@ -105,17 +119,23 @@ static inline void set_fs(mm_segment_t fs)
1276     #ifdef CONFIG_ARM64_SW_TTBR0_PAN
1277     static inline void __uaccess_ttbr0_disable(void)
1278     {
1279     - unsigned long ttbr;
1280     + unsigned long flags, ttbr;
1281    
1282     + local_irq_save(flags);
1283     + ttbr = read_sysreg(ttbr1_el1);
1284     + ttbr &= ~TTBR_ASID_MASK;
1285     /* reserved_ttbr0 placed at the end of swapper_pg_dir */
1286     - ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
1287     - write_sysreg(ttbr, ttbr0_el1);
1288     + write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
1289     isb();
1290     + /* Set reserved ASID */
1291     + write_sysreg(ttbr, ttbr1_el1);
1292     + isb();
1293     + local_irq_restore(flags);
1294     }
1295    
1296     static inline void __uaccess_ttbr0_enable(void)
1297     {
1298     - unsigned long flags;
1299     + unsigned long flags, ttbr0, ttbr1;
1300    
1301     /*
1302     * Disable interrupts to avoid preemption between reading the 'ttbr0'
1303     @@ -123,7 +143,17 @@ static inline void __uaccess_ttbr0_enable(void)
1304     * roll-over and an update of 'ttbr0'.
1305     */
1306     local_irq_save(flags);
1307     - write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
1308     + ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
1309     +
1310     + /* Restore active ASID */
1311     + ttbr1 = read_sysreg(ttbr1_el1);
1312     + ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
1313     + ttbr1 |= ttbr0 & TTBR_ASID_MASK;
1314     + write_sysreg(ttbr1, ttbr1_el1);
1315     + isb();
1316     +
1317     + /* Restore user page table */
1318     + write_sysreg(ttbr0, ttbr0_el1);
1319     isb();
1320     local_irq_restore(flags);
1321     }
1322     @@ -192,6 +222,26 @@ static inline void uaccess_enable_not_uao(void)
1323     __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
1324     }
1325    
1326     +/*
1327     + * Sanitise a uaccess pointer such that it becomes NULL if above the
1328     + * current addr_limit.
1329     + */
1330     +#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
1331     +static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
1332     +{
1333     + void __user *safe_ptr;
1334     +
1335     + asm volatile(
1336     + " bics xzr, %1, %2\n"
1337     + " csel %0, %1, xzr, eq\n"
1338     + : "=&r" (safe_ptr)
1339     + : "r" (ptr), "r" (current_thread_info()->addr_limit)
1340     + : "cc");
1341     +
1342     + csdb();
1343     + return safe_ptr;
1344     +}
1345     +
1346     /*
1347     * The "__xxx" versions of the user access functions do not verify the address
1348     * space - it must have been done previously with a separate "access_ok()"
1349     @@ -244,28 +294,33 @@ do { \
1350     (x) = (__force __typeof__(*(ptr)))__gu_val; \
1351     } while (0)
1352    
1353     -#define __get_user(x, ptr) \
1354     +#define __get_user_check(x, ptr, err) \
1355     ({ \
1356     - int __gu_err = 0; \
1357     - __get_user_err((x), (ptr), __gu_err); \
1358     - __gu_err; \
1359     + __typeof__(*(ptr)) __user *__p = (ptr); \
1360     + might_fault(); \
1361     + if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
1362     + __p = uaccess_mask_ptr(__p); \
1363     + __get_user_err((x), __p, (err)); \
1364     + } else { \
1365     + (x) = 0; (err) = -EFAULT; \
1366     + } \
1367     })
1368    
1369     #define __get_user_error(x, ptr, err) \
1370     ({ \
1371     - __get_user_err((x), (ptr), (err)); \
1372     + __get_user_check((x), (ptr), (err)); \
1373     (void)0; \
1374     })
1375    
1376     -#define get_user(x, ptr) \
1377     +#define __get_user(x, ptr) \
1378     ({ \
1379     - __typeof__(*(ptr)) __user *__p = (ptr); \
1380     - might_fault(); \
1381     - access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
1382     - __get_user((x), __p) : \
1383     - ((x) = 0, -EFAULT); \
1384     + int __gu_err = 0; \
1385     + __get_user_check((x), (ptr), __gu_err); \
1386     + __gu_err; \
1387     })
1388    
1389     +#define get_user __get_user
1390     +
1391     #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
1392     asm volatile( \
1393     "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
1394     @@ -308,43 +363,63 @@ do { \
1395     uaccess_disable_not_uao(); \
1396     } while (0)
1397    
1398     -#define __put_user(x, ptr) \
1399     +#define __put_user_check(x, ptr, err) \
1400     ({ \
1401     - int __pu_err = 0; \
1402     - __put_user_err((x), (ptr), __pu_err); \
1403     - __pu_err; \
1404     + __typeof__(*(ptr)) __user *__p = (ptr); \
1405     + might_fault(); \
1406     + if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
1407     + __p = uaccess_mask_ptr(__p); \
1408     + __put_user_err((x), __p, (err)); \
1409     + } else { \
1410     + (err) = -EFAULT; \
1411     + } \
1412     })
1413    
1414     #define __put_user_error(x, ptr, err) \
1415     ({ \
1416     - __put_user_err((x), (ptr), (err)); \
1417     + __put_user_check((x), (ptr), (err)); \
1418     (void)0; \
1419     })
1420    
1421     -#define put_user(x, ptr) \
1422     +#define __put_user(x, ptr) \
1423     ({ \
1424     - __typeof__(*(ptr)) __user *__p = (ptr); \
1425     - might_fault(); \
1426     - access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
1427     - __put_user((x), __p) : \
1428     - -EFAULT; \
1429     + int __pu_err = 0; \
1430     + __put_user_check((x), (ptr), __pu_err); \
1431     + __pu_err; \
1432     })
1433    
1434     +#define put_user __put_user
1435     +
1436     extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
1437     -#define raw_copy_from_user __arch_copy_from_user
1438     +#define raw_copy_from_user(to, from, n) \
1439     +({ \
1440     + __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
1441     +})
1442     +
1443     extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
1444     -#define raw_copy_to_user __arch_copy_to_user
1445     -extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
1446     -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1447     +#define raw_copy_to_user(to, from, n) \
1448     +({ \
1449     + __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
1450     +})
1451     +
1452     +extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
1453     +#define raw_copy_in_user(to, from, n) \
1454     +({ \
1455     + __arch_copy_in_user(__uaccess_mask_ptr(to), \
1456     + __uaccess_mask_ptr(from), (n)); \
1457     +})
1458     +
1459     #define INLINE_COPY_TO_USER
1460     #define INLINE_COPY_FROM_USER
1461    
1462     -static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
1463     +extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
1464     +static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
1465     {
1466     if (access_ok(VERIFY_WRITE, to, n))
1467     - n = __clear_user(to, n);
1468     + n = __arch_clear_user(__uaccess_mask_ptr(to), n);
1469     return n;
1470     }
1471     +#define clear_user __clear_user
1472    
1473     extern long strncpy_from_user(char *dest, const char __user *src, long count);
1474    
1475     @@ -358,7 +433,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __
1476     static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
1477     {
1478     kasan_check_write(dst, size);
1479     - return __copy_user_flushcache(dst, src, size);
1480     + return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
1481     }
1482     #endif
1483    
1484     diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
1485     index 067baace74a0..0c760db04858 100644
1486     --- a/arch/arm64/kernel/Makefile
1487     +++ b/arch/arm64/kernel/Makefile
1488     @@ -53,6 +53,10 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
1489     arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
1490     arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
1491    
1492     +ifeq ($(CONFIG_KVM),y)
1493     +arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
1494     +endif
1495     +
1496     obj-y += $(arm64-obj-y) vdso/ probes/
1497     obj-m += $(arm64-obj-m)
1498     head-y := head.o
1499     diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
1500     index 67368c7329c0..66be504edb6c 100644
1501     --- a/arch/arm64/kernel/arm64ksyms.c
1502     +++ b/arch/arm64/kernel/arm64ksyms.c
1503     @@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page);
1504     /* user mem (segment) */
1505     EXPORT_SYMBOL(__arch_copy_from_user);
1506     EXPORT_SYMBOL(__arch_copy_to_user);
1507     -EXPORT_SYMBOL(__clear_user);
1508     -EXPORT_SYMBOL(raw_copy_in_user);
1509     +EXPORT_SYMBOL(__arch_clear_user);
1510     +EXPORT_SYMBOL(__arch_copy_in_user);
1511    
1512     /* physical memory */
1513     EXPORT_SYMBOL(memstart_addr);
1514     diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
1515     index 71bf088f1e4b..af247d10252f 100644
1516     --- a/arch/arm64/kernel/asm-offsets.c
1517     +++ b/arch/arm64/kernel/asm-offsets.c
1518     @@ -24,6 +24,7 @@
1519     #include <linux/kvm_host.h>
1520     #include <linux/suspend.h>
1521     #include <asm/cpufeature.h>
1522     +#include <asm/fixmap.h>
1523     #include <asm/thread_info.h>
1524     #include <asm/memory.h>
1525     #include <asm/smp_plat.h>
1526     @@ -148,11 +149,14 @@ int main(void)
1527     DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
1528     DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
1529     DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
1530     -
1531     BLANK();
1532     DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
1533     DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
1534     DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
1535     DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val));
1536     + BLANK();
1537     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1538     + DEFINE(TRAMP_VALIAS, TRAMP_VALIAS);
1539     +#endif
1540     return 0;
1541     }
1542     diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
1543     new file mode 100644
1544     index 000000000000..e5de33513b5d
1545     --- /dev/null
1546     +++ b/arch/arm64/kernel/bpi.S
1547     @@ -0,0 +1,83 @@
1548     +/*
1549     + * Contains CPU specific branch predictor invalidation sequences
1550     + *
1551     + * Copyright (C) 2018 ARM Ltd.
1552     + *
1553     + * This program is free software; you can redistribute it and/or modify
1554     + * it under the terms of the GNU General Public License version 2 as
1555     + * published by the Free Software Foundation.
1556     + *
1557     + * This program is distributed in the hope that it will be useful,
1558     + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1559     + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1560     + * GNU General Public License for more details.
1561     + *
1562     + * You should have received a copy of the GNU General Public License
1563     + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1564     + */
1565     +
1566     +#include <linux/linkage.h>
1567     +#include <linux/arm-smccc.h>
1568     +
1569     +.macro ventry target
1570     + .rept 31
1571     + nop
1572     + .endr
1573     + b \target
1574     +.endm
1575     +
1576     +.macro vectors target
1577     + ventry \target + 0x000
1578     + ventry \target + 0x080
1579     + ventry \target + 0x100
1580     + ventry \target + 0x180
1581     +
1582     + ventry \target + 0x200
1583     + ventry \target + 0x280
1584     + ventry \target + 0x300
1585     + ventry \target + 0x380
1586     +
1587     + ventry \target + 0x400
1588     + ventry \target + 0x480
1589     + ventry \target + 0x500
1590     + ventry \target + 0x580
1591     +
1592     + ventry \target + 0x600
1593     + ventry \target + 0x680
1594     + ventry \target + 0x700
1595     + ventry \target + 0x780
1596     +.endm
1597     +
1598     + .align 11
1599     +ENTRY(__bp_harden_hyp_vecs_start)
1600     + .rept 4
1601     + vectors __kvm_hyp_vector
1602     + .endr
1603     +ENTRY(__bp_harden_hyp_vecs_end)
1604     +
1605     +ENTRY(__qcom_hyp_sanitize_link_stack_start)
1606     + stp x29, x30, [sp, #-16]!
1607     + .rept 16
1608     + bl . + 4
1609     + .endr
1610     + ldp x29, x30, [sp], #16
1611     +ENTRY(__qcom_hyp_sanitize_link_stack_end)
1612     +
1613     +.macro smccc_workaround_1 inst
1614     + sub sp, sp, #(8 * 4)
1615     + stp x2, x3, [sp, #(8 * 0)]
1616     + stp x0, x1, [sp, #(8 * 2)]
1617     + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
1618     + \inst #0
1619     + ldp x2, x3, [sp, #(8 * 0)]
1620     + ldp x0, x1, [sp, #(8 * 2)]
1621     + add sp, sp, #(8 * 4)
1622     +.endm
1623     +
1624     +ENTRY(__smccc_workaround_1_smc_start)
1625     + smccc_workaround_1 smc
1626     +ENTRY(__smccc_workaround_1_smc_end)
1627     +
1628     +ENTRY(__smccc_workaround_1_hvc_start)
1629     + smccc_workaround_1 hvc
1630     +ENTRY(__smccc_workaround_1_hvc_end)
1631     diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
1632     index 2a752cb2a0f3..8021b46c9743 100644
1633     --- a/arch/arm64/kernel/cpu-reset.S
1634     +++ b/arch/arm64/kernel/cpu-reset.S
1635     @@ -16,7 +16,7 @@
1636     #include <asm/virt.h>
1637    
1638     .text
1639     -.pushsection .idmap.text, "ax"
1640     +.pushsection .idmap.text, "awx"
1641    
1642     /*
1643     * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
1644     diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
1645     index 0e27f86ee709..07823595b7f0 100644
1646     --- a/arch/arm64/kernel/cpu_errata.c
1647     +++ b/arch/arm64/kernel/cpu_errata.c
1648     @@ -30,6 +30,20 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
1649     entry->midr_range_max);
1650     }
1651    
1652     +static bool __maybe_unused
1653     +is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
1654     +{
1655     + u32 model;
1656     +
1657     + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
1658     +
1659     + model = read_cpuid_id();
1660     + model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
1661     + MIDR_ARCHITECTURE_MASK;
1662     +
1663     + return model == entry->midr_model;
1664     +}
1665     +
1666     static bool
1667     has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
1668     int scope)
1669     @@ -46,6 +60,174 @@ static int cpu_enable_trap_ctr_access(void *__unused)
1670     return 0;
1671     }
1672    
1673     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1674     +#include <asm/mmu_context.h>
1675     +#include <asm/cacheflush.h>
1676     +
1677     +DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
1678     +
1679     +#ifdef CONFIG_KVM
1680     +extern char __qcom_hyp_sanitize_link_stack_start[];
1681     +extern char __qcom_hyp_sanitize_link_stack_end[];
1682     +extern char __smccc_workaround_1_smc_start[];
1683     +extern char __smccc_workaround_1_smc_end[];
1684     +extern char __smccc_workaround_1_hvc_start[];
1685     +extern char __smccc_workaround_1_hvc_end[];
1686     +
1687     +static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
1688     + const char *hyp_vecs_end)
1689     +{
1690     + void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
1691     + int i;
1692     +
1693     + for (i = 0; i < SZ_2K; i += 0x80)
1694     + memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
1695     +
1696     + flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
1697     +}
1698     +
1699     +static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
1700     + const char *hyp_vecs_start,
1701     + const char *hyp_vecs_end)
1702     +{
1703     + static int last_slot = -1;
1704     + static DEFINE_SPINLOCK(bp_lock);
1705     + int cpu, slot = -1;
1706     +
1707     + spin_lock(&bp_lock);
1708     + for_each_possible_cpu(cpu) {
1709     + if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
1710     + slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
1711     + break;
1712     + }
1713     + }
1714     +
1715     + if (slot == -1) {
1716     + last_slot++;
1717     + BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
1718     + / SZ_2K) <= last_slot);
1719     + slot = last_slot;
1720     + __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
1721     + }
1722     +
1723     + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
1724     + __this_cpu_write(bp_hardening_data.fn, fn);
1725     + spin_unlock(&bp_lock);
1726     +}
1727     +#else
1728     +#define __qcom_hyp_sanitize_link_stack_start NULL
1729     +#define __qcom_hyp_sanitize_link_stack_end NULL
1730     +#define __smccc_workaround_1_smc_start NULL
1731     +#define __smccc_workaround_1_smc_end NULL
1732     +#define __smccc_workaround_1_hvc_start NULL
1733     +#define __smccc_workaround_1_hvc_end NULL
1734     +
1735     +static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
1736     + const char *hyp_vecs_start,
1737     + const char *hyp_vecs_end)
1738     +{
1739     + __this_cpu_write(bp_hardening_data.fn, fn);
1740     +}
1741     +#endif /* CONFIG_KVM */
1742     +
1743     +static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
1744     + bp_hardening_cb_t fn,
1745     + const char *hyp_vecs_start,
1746     + const char *hyp_vecs_end)
1747     +{
1748     + u64 pfr0;
1749     +
1750     + if (!entry->matches(entry, SCOPE_LOCAL_CPU))
1751     + return;
1752     +
1753     + pfr0 = read_cpuid(ID_AA64PFR0_EL1);
1754     + if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
1755     + return;
1756     +
1757     + __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
1758     +}
1759     +
1760     +#include <uapi/linux/psci.h>
1761     +#include <linux/arm-smccc.h>
1762     +#include <linux/psci.h>
1763     +
1764     +static void call_smc_arch_workaround_1(void)
1765     +{
1766     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
1767     +}
1768     +
1769     +static void call_hvc_arch_workaround_1(void)
1770     +{
1771     + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
1772     +}
1773     +
1774     +static int enable_smccc_arch_workaround_1(void *data)
1775     +{
1776     + const struct arm64_cpu_capabilities *entry = data;
1777     + bp_hardening_cb_t cb;
1778     + void *smccc_start, *smccc_end;
1779     + struct arm_smccc_res res;
1780     +
1781     + if (!entry->matches(entry, SCOPE_LOCAL_CPU))
1782     + return 0;
1783     +
1784     + if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
1785     + return 0;
1786     +
1787     + switch (psci_ops.conduit) {
1788     + case PSCI_CONDUIT_HVC:
1789     + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1790     + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1791     + if (res.a0)
1792     + return 0;
1793     + cb = call_hvc_arch_workaround_1;
1794     + smccc_start = __smccc_workaround_1_hvc_start;
1795     + smccc_end = __smccc_workaround_1_hvc_end;
1796     + break;
1797     +
1798     + case PSCI_CONDUIT_SMC:
1799     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1800     + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1801     + if (res.a0)
1802     + return 0;
1803     + cb = call_smc_arch_workaround_1;
1804     + smccc_start = __smccc_workaround_1_smc_start;
1805     + smccc_end = __smccc_workaround_1_smc_end;
1806     + break;
1807     +
1808     + default:
1809     + return 0;
1810     + }
1811     +
1812     + install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
1813     +
1814     + return 0;
1815     +}
1816     +
1817     +static void qcom_link_stack_sanitization(void)
1818     +{
1819     + u64 tmp;
1820     +
1821     + asm volatile("mov %0, x30 \n"
1822     + ".rept 16 \n"
1823     + "bl . + 4 \n"
1824     + ".endr \n"
1825     + "mov x30, %0 \n"
1826     + : "=&r" (tmp));
1827     +}
1828     +
1829     +static int qcom_enable_link_stack_sanitization(void *data)
1830     +{
1831     + const struct arm64_cpu_capabilities *entry = data;
1832     +
1833     + install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
1834     + __qcom_hyp_sanitize_link_stack_start,
1835     + __qcom_hyp_sanitize_link_stack_end);
1836     +
1837     + return 0;
1838     +}
1839     +#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
1840     +
1841     #define MIDR_RANGE(model, min, max) \
1842     .def_scope = SCOPE_LOCAL_CPU, \
1843     .matches = is_affected_midr_range, \
1844     @@ -169,6 +351,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1845     MIDR_CPU_VAR_REV(0, 0),
1846     MIDR_CPU_VAR_REV(0, 0)),
1847     },
1848     + {
1849     + .desc = "Qualcomm Technologies Kryo erratum 1003",
1850     + .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
1851     + .def_scope = SCOPE_LOCAL_CPU,
1852     + .midr_model = MIDR_QCOM_KRYO,
1853     + .matches = is_kryo_midr,
1854     + },
1855     #endif
1856     #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
1857     {
1858     @@ -186,6 +375,47 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1859     .capability = ARM64_WORKAROUND_858921,
1860     MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1861     },
1862     +#endif
1863     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1864     + {
1865     + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1866     + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
1867     + .enable = enable_smccc_arch_workaround_1,
1868     + },
1869     + {
1870     + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1871     + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
1872     + .enable = enable_smccc_arch_workaround_1,
1873     + },
1874     + {
1875     + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1876     + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1877     + .enable = enable_smccc_arch_workaround_1,
1878     + },
1879     + {
1880     + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1881     + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
1882     + .enable = enable_smccc_arch_workaround_1,
1883     + },
1884     + {
1885     + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1886     + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
1887     + .enable = qcom_enable_link_stack_sanitization,
1888     + },
1889     + {
1890     + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
1891     + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
1892     + },
1893     + {
1894     + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1895     + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
1896     + .enable = enable_smccc_arch_workaround_1,
1897     + },
1898     + {
1899     + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1900     + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
1901     + .enable = enable_smccc_arch_workaround_1,
1902     + },
1903     #endif
1904     {
1905     }
1906     @@ -200,15 +430,18 @@ void verify_local_cpu_errata_workarounds(void)
1907     {
1908     const struct arm64_cpu_capabilities *caps = arm64_errata;
1909    
1910     - for (; caps->matches; caps++)
1911     - if (!cpus_have_cap(caps->capability) &&
1912     - caps->matches(caps, SCOPE_LOCAL_CPU)) {
1913     + for (; caps->matches; caps++) {
1914     + if (cpus_have_cap(caps->capability)) {
1915     + if (caps->enable)
1916     + caps->enable((void *)caps);
1917     + } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
1918     pr_crit("CPU%d: Requires work around for %s, not detected"
1919     " at boot time\n",
1920     smp_processor_id(),
1921     caps->desc ? : "an erratum");
1922     cpu_die_early();
1923     }
1924     + }
1925     }
1926    
1927     void update_cpu_errata_workarounds(void)
1928     diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
1929     index a73a5928f09b..46dee071bab1 100644
1930     --- a/arch/arm64/kernel/cpufeature.c
1931     +++ b/arch/arm64/kernel/cpufeature.c
1932     @@ -145,6 +145,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
1933     };
1934    
1935     static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
1936     + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
1937     + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
1938     ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
1939     FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
1940     ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
1941     @@ -846,6 +848,86 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
1942     ID_AA64PFR0_FP_SHIFT) < 0;
1943     }
1944    
1945     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1946     +static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
1947     +
1948     +static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
1949     + int __unused)
1950     +{
1951     + char const *str = "command line option";
1952     + u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1953     +
1954     + /*
1955     + * For reasons that aren't entirely clear, enabling KPTI on Cavium
1956     + * ThunderX leads to apparent I-cache corruption of kernel text, which
1957     + * ends as well as you might imagine. Don't even try.
1958     + */
1959     + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
1960     + str = "ARM64_WORKAROUND_CAVIUM_27456";
1961     + __kpti_forced = -1;
1962     + }
1963     +
1964     + /* Forced? */
1965     + if (__kpti_forced) {
1966     + pr_info_once("kernel page table isolation forced %s by %s\n",
1967     + __kpti_forced > 0 ? "ON" : "OFF", str);
1968     + return __kpti_forced > 0;
1969     + }
1970     +
1971     + /* Useful for KASLR robustness */
1972     + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
1973     + return true;
1974     +
1975     + /* Don't force KPTI for CPUs that are not vulnerable */
1976     + switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
1977     + case MIDR_CAVIUM_THUNDERX2:
1978     + case MIDR_BRCM_VULCAN:
1979     + return false;
1980     + }
1981     +
1982     + /* Defer to CPU feature registers */
1983     + return !cpuid_feature_extract_unsigned_field(pfr0,
1984     + ID_AA64PFR0_CSV3_SHIFT);
1985     +}
1986     +
1987     +static int kpti_install_ng_mappings(void *__unused)
1988     +{
1989     + typedef void (kpti_remap_fn)(int, int, phys_addr_t);
1990     + extern kpti_remap_fn idmap_kpti_install_ng_mappings;
1991     + kpti_remap_fn *remap_fn;
1992     +
1993     + static bool kpti_applied = false;
1994     + int cpu = smp_processor_id();
1995     +
1996     + if (kpti_applied)
1997     + return 0;
1998     +
1999     + remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
2000     +
2001     + cpu_install_idmap();
2002     + remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
2003     + cpu_uninstall_idmap();
2004     +
2005     + if (!cpu)
2006     + kpti_applied = true;
2007     +
2008     + return 0;
2009     +}
2010     +
2011     +static int __init parse_kpti(char *str)
2012     +{
2013     + bool enabled;
2014     + int ret = strtobool(str, &enabled);
2015     +
2016     + if (ret)
2017     + return ret;
2018     +
2019     + __kpti_forced = enabled ? 1 : -1;
2020     + return 0;
2021     +}
2022     +__setup("kpti=", parse_kpti);
2023     +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
2024     +
2025     static const struct arm64_cpu_capabilities arm64_features[] = {
2026     {
2027     .desc = "GIC system register CPU interface",
2028     @@ -932,6 +1014,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
2029     .def_scope = SCOPE_SYSTEM,
2030     .matches = hyp_offset_low,
2031     },
2032     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2033     + {
2034     + .desc = "Kernel page table isolation (KPTI)",
2035     + .capability = ARM64_UNMAP_KERNEL_AT_EL0,
2036     + .def_scope = SCOPE_SYSTEM,
2037     + .matches = unmap_kernel_at_el0,
2038     + .enable = kpti_install_ng_mappings,
2039     + },
2040     +#endif
2041     {
2042     /* FP/SIMD is not implemented */
2043     .capability = ARM64_HAS_NO_FPSIMD,
2044     @@ -1071,6 +1162,25 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
2045     cap_set_elf_hwcap(hwcaps);
2046     }
2047    
2048     +/*
2049     + * Check if the current CPU has a given feature capability.
2050     + * Should be called from non-preemptible context.
2051     + */
2052     +static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
2053     + unsigned int cap)
2054     +{
2055     + const struct arm64_cpu_capabilities *caps;
2056     +
2057     + if (WARN_ON(preemptible()))
2058     + return false;
2059     +
2060     + for (caps = cap_array; caps->matches; caps++)
2061     + if (caps->capability == cap &&
2062     + caps->matches(caps, SCOPE_LOCAL_CPU))
2063     + return true;
2064     + return false;
2065     +}
2066     +
2067     void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
2068     const char *info)
2069     {
2070     @@ -1106,7 +1216,7 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
2071     * uses an IPI, giving us a PSTATE that disappears when
2072     * we return.
2073     */
2074     - stop_machine(caps->enable, NULL, cpu_online_mask);
2075     + stop_machine(caps->enable, (void *)caps, cpu_online_mask);
2076     }
2077     }
2078     }
2079     @@ -1134,8 +1244,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
2080     }
2081    
2082     static void
2083     -verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
2084     +verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
2085     {
2086     + const struct arm64_cpu_capabilities *caps = caps_list;
2087     for (; caps->matches; caps++) {
2088     if (!cpus_have_cap(caps->capability))
2089     continue;
2090     @@ -1143,13 +1254,13 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
2091     * If the new CPU misses an advertised feature, we cannot proceed
2092     * further, park the cpu.
2093     */
2094     - if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
2095     + if (!__this_cpu_has_cap(caps_list, caps->capability)) {
2096     pr_crit("CPU%d: missing feature: %s\n",
2097     smp_processor_id(), caps->desc);
2098     cpu_die_early();
2099     }
2100     if (caps->enable)
2101     - caps->enable(NULL);
2102     + caps->enable((void *)caps);
2103     }
2104     }
2105    
2106     @@ -1225,25 +1336,6 @@ static void __init mark_const_caps_ready(void)
2107     static_branch_enable(&arm64_const_caps_ready);
2108     }
2109    
2110     -/*
2111     - * Check if the current CPU has a given feature capability.
2112     - * Should be called from non-preemptible context.
2113     - */
2114     -static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
2115     - unsigned int cap)
2116     -{
2117     - const struct arm64_cpu_capabilities *caps;
2118     -
2119     - if (WARN_ON(preemptible()))
2120     - return false;
2121     -
2122     - for (caps = cap_array; caps->desc; caps++)
2123     - if (caps->capability == cap && caps->matches)
2124     - return caps->matches(caps, SCOPE_LOCAL_CPU);
2125     -
2126     - return false;
2127     -}
2128     -
2129     extern const struct arm64_cpu_capabilities arm64_errata[];
2130    
2131     bool this_cpu_has_cap(unsigned int cap)
2132     diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
2133     index 6d14b8f29b5f..78647eda6d0d 100644
2134     --- a/arch/arm64/kernel/entry.S
2135     +++ b/arch/arm64/kernel/entry.S
2136     @@ -28,6 +28,8 @@
2137     #include <asm/errno.h>
2138     #include <asm/esr.h>
2139     #include <asm/irq.h>
2140     +#include <asm/memory.h>
2141     +#include <asm/mmu.h>
2142     #include <asm/processor.h>
2143     #include <asm/ptrace.h>
2144     #include <asm/thread_info.h>
2145     @@ -69,8 +71,21 @@
2146     #define BAD_FIQ 2
2147     #define BAD_ERROR 3
2148    
2149     - .macro kernel_ventry label
2150     + .macro kernel_ventry, el, label, regsize = 64
2151     .align 7
2152     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2153     +alternative_if ARM64_UNMAP_KERNEL_AT_EL0
2154     + .if \el == 0
2155     + .if \regsize == 64
2156     + mrs x30, tpidrro_el0
2157     + msr tpidrro_el0, xzr
2158     + .else
2159     + mov x30, xzr
2160     + .endif
2161     + .endif
2162     +alternative_else_nop_endif
2163     +#endif
2164     +
2165     sub sp, sp, #S_FRAME_SIZE
2166     #ifdef CONFIG_VMAP_STACK
2167     /*
2168     @@ -82,7 +97,7 @@
2169     tbnz x0, #THREAD_SHIFT, 0f
2170     sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
2171     sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
2172     - b \label
2173     + b el\()\el\()_\label
2174    
2175     0:
2176     /*
2177     @@ -114,7 +129,12 @@
2178     sub sp, sp, x0
2179     mrs x0, tpidrro_el0
2180     #endif
2181     - b \label
2182     + b el\()\el\()_\label
2183     + .endm
2184     +
2185     + .macro tramp_alias, dst, sym
2186     + mov_q \dst, TRAMP_VALIAS
2187     + add \dst, \dst, #(\sym - .entry.tramp.text)
2188     .endm
2189    
2190     .macro kernel_entry, el, regsize = 64
2191     @@ -147,10 +167,10 @@
2192     .else
2193     add x21, sp, #S_FRAME_SIZE
2194     get_thread_info tsk
2195     - /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
2196     + /* Save the task's original addr_limit and set USER_DS */
2197     ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
2198     str x20, [sp, #S_ORIG_ADDR_LIMIT]
2199     - mov x20, #TASK_SIZE_64
2200     + mov x20, #USER_DS
2201     str x20, [tsk, #TSK_TI_ADDR_LIMIT]
2202     /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
2203     .endif /* \el == 0 */
2204     @@ -185,7 +205,7 @@ alternative_else_nop_endif
2205    
2206     .if \el != 0
2207     mrs x21, ttbr0_el1
2208     - tst x21, #0xffff << 48 // Check for the reserved ASID
2209     + tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
2210     orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
2211     b.eq 1f // TTBR0 access already disabled
2212     and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
2213     @@ -248,7 +268,7 @@ alternative_else_nop_endif
2214     tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
2215     .endif
2216    
2217     - __uaccess_ttbr0_enable x0
2218     + __uaccess_ttbr0_enable x0, x1
2219    
2220     .if \el == 0
2221     /*
2222     @@ -257,7 +277,7 @@ alternative_else_nop_endif
2223     * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
2224     * corruption).
2225     */
2226     - post_ttbr0_update_workaround
2227     + bl post_ttbr_update_workaround
2228     .endif
2229     1:
2230     .if \el != 0
2231     @@ -269,18 +289,20 @@ alternative_else_nop_endif
2232     .if \el == 0
2233     ldr x23, [sp, #S_SP] // load return stack pointer
2234     msr sp_el0, x23
2235     + tst x22, #PSR_MODE32_BIT // native task?
2236     + b.eq 3f
2237     +
2238     #ifdef CONFIG_ARM64_ERRATUM_845719
2239     alternative_if ARM64_WORKAROUND_845719
2240     - tbz x22, #4, 1f
2241     #ifdef CONFIG_PID_IN_CONTEXTIDR
2242     mrs x29, contextidr_el1
2243     msr contextidr_el1, x29
2244     #else
2245     msr contextidr_el1, xzr
2246     #endif
2247     -1:
2248     alternative_else_nop_endif
2249     #endif
2250     +3:
2251     .endif
2252    
2253     msr elr_el1, x21 // set up the return data
2254     @@ -302,7 +324,21 @@ alternative_else_nop_endif
2255     ldp x28, x29, [sp, #16 * 14]
2256     ldr lr, [sp, #S_LR]
2257     add sp, sp, #S_FRAME_SIZE // restore sp
2258     - eret // return to kernel
2259     +
2260     + .if \el == 0
2261     +alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
2262     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2263     + bne 4f
2264     + msr far_el1, x30
2265     + tramp_alias x30, tramp_exit_native
2266     + br x30
2267     +4:
2268     + tramp_alias x30, tramp_exit_compat
2269     + br x30
2270     +#endif
2271     + .else
2272     + eret
2273     + .endif
2274     .endm
2275    
2276     .macro irq_stack_entry
2277     @@ -342,6 +378,7 @@ alternative_else_nop_endif
2278     * x7 is reserved for the system call number in 32-bit mode.
2279     */
2280     wsc_nr .req w25 // number of system calls
2281     +xsc_nr .req x25 // number of system calls (zero-extended)
2282     wscno .req w26 // syscall number
2283     xscno .req x26 // syscall number (zero-extended)
2284     stbl .req x27 // syscall table pointer
2285     @@ -367,31 +404,31 @@ tsk .req x28 // current thread_info
2286    
2287     .align 11
2288     ENTRY(vectors)
2289     - kernel_ventry el1_sync_invalid // Synchronous EL1t
2290     - kernel_ventry el1_irq_invalid // IRQ EL1t
2291     - kernel_ventry el1_fiq_invalid // FIQ EL1t
2292     - kernel_ventry el1_error_invalid // Error EL1t
2293     + kernel_ventry 1, sync_invalid // Synchronous EL1t
2294     + kernel_ventry 1, irq_invalid // IRQ EL1t
2295     + kernel_ventry 1, fiq_invalid // FIQ EL1t
2296     + kernel_ventry 1, error_invalid // Error EL1t
2297    
2298     - kernel_ventry el1_sync // Synchronous EL1h
2299     - kernel_ventry el1_irq // IRQ EL1h
2300     - kernel_ventry el1_fiq_invalid // FIQ EL1h
2301     - kernel_ventry el1_error // Error EL1h
2302     + kernel_ventry 1, sync // Synchronous EL1h
2303     + kernel_ventry 1, irq // IRQ EL1h
2304     + kernel_ventry 1, fiq_invalid // FIQ EL1h
2305     + kernel_ventry 1, error // Error EL1h
2306    
2307     - kernel_ventry el0_sync // Synchronous 64-bit EL0
2308     - kernel_ventry el0_irq // IRQ 64-bit EL0
2309     - kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
2310     - kernel_ventry el0_error // Error 64-bit EL0
2311     + kernel_ventry 0, sync // Synchronous 64-bit EL0
2312     + kernel_ventry 0, irq // IRQ 64-bit EL0
2313     + kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
2314     + kernel_ventry 0, error // Error 64-bit EL0
2315    
2316     #ifdef CONFIG_COMPAT
2317     - kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
2318     - kernel_ventry el0_irq_compat // IRQ 32-bit EL0
2319     - kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
2320     - kernel_ventry el0_error_compat // Error 32-bit EL0
2321     + kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
2322     + kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
2323     + kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
2324     + kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
2325     #else
2326     - kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
2327     - kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
2328     - kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0
2329     - kernel_ventry el0_error_invalid // Error 32-bit EL0
2330     + kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
2331     + kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
2332     + kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
2333     + kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
2334     #endif
2335     END(vectors)
2336    
2337     @@ -685,12 +722,15 @@ el0_ia:
2338     * Instruction abort handling
2339     */
2340     mrs x26, far_el1
2341     - enable_daif
2342     + enable_da_f
2343     +#ifdef CONFIG_TRACE_IRQFLAGS
2344     + bl trace_hardirqs_off
2345     +#endif
2346     ct_user_exit
2347     mov x0, x26
2348     mov x1, x25
2349     mov x2, sp
2350     - bl do_mem_abort
2351     + bl do_el0_ia_bp_hardening
2352     b ret_to_user
2353     el0_fpsimd_acc:
2354     /*
2355     @@ -727,7 +767,10 @@ el0_sp_pc:
2356     * Stack or PC alignment exception handling
2357     */
2358     mrs x26, far_el1
2359     - enable_daif
2360     + enable_da_f
2361     +#ifdef CONFIG_TRACE_IRQFLAGS
2362     + bl trace_hardirqs_off
2363     +#endif
2364     ct_user_exit
2365     mov x0, x26
2366     mov x1, x25
2367     @@ -785,6 +828,11 @@ el0_irq_naked:
2368     #endif
2369    
2370     ct_user_exit
2371     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
2372     + tbz x22, #55, 1f
2373     + bl do_el0_irq_bp_hardening
2374     +1:
2375     +#endif
2376     irq_handler
2377    
2378     #ifdef CONFIG_TRACE_IRQFLAGS
2379     @@ -896,6 +944,7 @@ el0_svc_naked: // compat entry point
2380     b.ne __sys_trace
2381     cmp wscno, wsc_nr // check upper syscall limit
2382     b.hs ni_sys
2383     + mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number
2384     ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
2385     blr x16 // call sys_* routine
2386     b ret_fast_syscall
2387     @@ -943,6 +992,117 @@ __ni_sys_trace:
2388    
2389     .popsection // .entry.text
2390    
2391     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2392     +/*
2393     + * Exception vectors trampoline.
2394     + */
2395     + .pushsection ".entry.tramp.text", "ax"
2396     +
2397     + .macro tramp_map_kernel, tmp
2398     + mrs \tmp, ttbr1_el1
2399     + sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
2400     + bic \tmp, \tmp, #USER_ASID_FLAG
2401     + msr ttbr1_el1, \tmp
2402     +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
2403     +alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
2404     + /* ASID already in \tmp[63:48] */
2405     + movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
2406     + movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
2407     + /* 2MB boundary containing the vectors, so we nobble the walk cache */
2408     + movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
2409     + isb
2410     + tlbi vae1, \tmp
2411     + dsb nsh
2412     +alternative_else_nop_endif
2413     +#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
2414     + .endm
2415     +
2416     + .macro tramp_unmap_kernel, tmp
2417     + mrs \tmp, ttbr1_el1
2418     + add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
2419     + orr \tmp, \tmp, #USER_ASID_FLAG
2420     + msr ttbr1_el1, \tmp
2421     + /*
2422     + * We avoid running the post_ttbr_update_workaround here because
2423     + * it's only needed by Cavium ThunderX, which requires KPTI to be
2424     + * disabled.
2425     + */
2426     + .endm
2427     +
2428     + .macro tramp_ventry, regsize = 64
2429     + .align 7
2430     +1:
2431     + .if \regsize == 64
2432     + msr tpidrro_el0, x30 // Restored in kernel_ventry
2433     + .endif
2434     + /*
2435     + * Defend against branch aliasing attacks by pushing a dummy
2436     + * entry onto the return stack and using a RET instruction to
2437     + * enter the full-fat kernel vectors.
2438     + */
2439     + bl 2f
2440     + b .
2441     +2:
2442     + tramp_map_kernel x30
2443     +#ifdef CONFIG_RANDOMIZE_BASE
2444     + adr x30, tramp_vectors + PAGE_SIZE
2445     +alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
2446     + ldr x30, [x30]
2447     +#else
2448     + ldr x30, =vectors
2449     +#endif
2450     + prfm plil1strm, [x30, #(1b - tramp_vectors)]
2451     + msr vbar_el1, x30
2452     + add x30, x30, #(1b - tramp_vectors)
2453     + isb
2454     + ret
2455     + .endm
2456     +
2457     + .macro tramp_exit, regsize = 64
2458     + adr x30, tramp_vectors
2459     + msr vbar_el1, x30
2460     + tramp_unmap_kernel x30
2461     + .if \regsize == 64
2462     + mrs x30, far_el1
2463     + .endif
2464     + eret
2465     + .endm
2466     +
2467     + .align 11
2468     +ENTRY(tramp_vectors)
2469     + .space 0x400
2470     +
2471     + tramp_ventry
2472     + tramp_ventry
2473     + tramp_ventry
2474     + tramp_ventry
2475     +
2476     + tramp_ventry 32
2477     + tramp_ventry 32
2478     + tramp_ventry 32
2479     + tramp_ventry 32
2480     +END(tramp_vectors)
2481     +
2482     +ENTRY(tramp_exit_native)
2483     + tramp_exit
2484     +END(tramp_exit_native)
2485     +
2486     +ENTRY(tramp_exit_compat)
2487     + tramp_exit 32
2488     +END(tramp_exit_compat)
2489     +
2490     + .ltorg
2491     + .popsection // .entry.tramp.text
2492     +#ifdef CONFIG_RANDOMIZE_BASE
2493     + .pushsection ".rodata", "a"
2494     + .align PAGE_SHIFT
2495     + .globl __entry_tramp_data_start
2496     +__entry_tramp_data_start:
2497     + .quad vectors
2498     + .popsection // .rodata
2499     +#endif /* CONFIG_RANDOMIZE_BASE */
2500     +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
2501     +
2502     /*
2503     * Special system call wrappers.
2504     */
2505     diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
2506     index e3cb9fbf96b6..9b655d69c471 100644
2507     --- a/arch/arm64/kernel/head.S
2508     +++ b/arch/arm64/kernel/head.S
2509     @@ -371,7 +371,7 @@ ENDPROC(__primary_switched)
2510     * end early head section, begin head code that is also used for
2511     * hotplug and needs to have the same protections as the text region
2512     */
2513     - .section ".idmap.text","ax"
2514     + .section ".idmap.text","awx"
2515    
2516     ENTRY(kimage_vaddr)
2517     .quad _text - TEXT_OFFSET
2518     diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
2519     index 6b7dcf4310ac..583fd8154695 100644
2520     --- a/arch/arm64/kernel/process.c
2521     +++ b/arch/arm64/kernel/process.c
2522     @@ -370,16 +370,14 @@ void tls_preserve_current_state(void)
2523    
2524     static void tls_thread_switch(struct task_struct *next)
2525     {
2526     - unsigned long tpidr, tpidrro;
2527     -
2528     tls_preserve_current_state();
2529    
2530     - tpidr = *task_user_tls(next);
2531     - tpidrro = is_compat_thread(task_thread_info(next)) ?
2532     - next->thread.tp_value : 0;
2533     + if (is_compat_thread(task_thread_info(next)))
2534     + write_sysreg(next->thread.tp_value, tpidrro_el0);
2535     + else if (!arm64_kernel_unmapped_at_el0())
2536     + write_sysreg(0, tpidrro_el0);
2537    
2538     - write_sysreg(tpidr, tpidr_el0);
2539     - write_sysreg(tpidrro, tpidrro_el0);
2540     + write_sysreg(*task_user_tls(next), tpidr_el0);
2541     }
2542    
2543     /* Restore the UAO state depending on next's addr_limit */
2544     diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
2545     index 10dd16d7902d..bebec8ef9372 100644
2546     --- a/arch/arm64/kernel/sleep.S
2547     +++ b/arch/arm64/kernel/sleep.S
2548     @@ -96,7 +96,7 @@ ENTRY(__cpu_suspend_enter)
2549     ret
2550     ENDPROC(__cpu_suspend_enter)
2551    
2552     - .pushsection ".idmap.text", "ax"
2553     + .pushsection ".idmap.text", "awx"
2554     ENTRY(cpu_resume)
2555     bl el2_setup // if in EL2 drop to EL1 cleanly
2556     bl __cpu_setup
2557     diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
2558     index 7da3e5c366a0..ddfd3c0942f7 100644
2559     --- a/arch/arm64/kernel/vmlinux.lds.S
2560     +++ b/arch/arm64/kernel/vmlinux.lds.S
2561     @@ -57,6 +57,17 @@ jiffies = jiffies_64;
2562     #define HIBERNATE_TEXT
2563     #endif
2564    
2565     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2566     +#define TRAMP_TEXT \
2567     + . = ALIGN(PAGE_SIZE); \
2568     + VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \
2569     + *(.entry.tramp.text) \
2570     + . = ALIGN(PAGE_SIZE); \
2571     + VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
2572     +#else
2573     +#define TRAMP_TEXT
2574     +#endif
2575     +
2576     /*
2577     * The size of the PE/COFF section that covers the kernel image, which
2578     * runs from stext to _edata, must be a round multiple of the PE/COFF
2579     @@ -113,6 +124,7 @@ SECTIONS
2580     HYPERVISOR_TEXT
2581     IDMAP_TEXT
2582     HIBERNATE_TEXT
2583     + TRAMP_TEXT
2584     *(.fixup)
2585     *(.gnu.warning)
2586     . = ALIGN(16);
2587     @@ -214,6 +226,11 @@ SECTIONS
2588     . += RESERVED_TTBR0_SIZE;
2589     #endif
2590    
2591     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2592     + tramp_pg_dir = .;
2593     + . += PAGE_SIZE;
2594     +#endif
2595     +
2596     __pecoff_data_size = ABSOLUTE(. - __initdata_begin);
2597     _end = .;
2598    
2599     @@ -234,7 +251,10 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
2600     ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
2601     <= SZ_4K, "Hibernate exit text too big or misaligned")
2602     #endif
2603     -
2604     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2605     +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
2606     + "Entry trampoline text too big")
2607     +#endif
2608     /*
2609     * If padding is applied before .head.text, virt<->phys conversions will fail.
2610     */
2611     diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
2612     index e60494f1eef9..c6c59356aa88 100644
2613     --- a/arch/arm64/kvm/handle_exit.c
2614     +++ b/arch/arm64/kvm/handle_exit.c
2615     @@ -22,12 +22,13 @@
2616     #include <linux/kvm.h>
2617     #include <linux/kvm_host.h>
2618    
2619     +#include <kvm/arm_psci.h>
2620     +
2621     #include <asm/esr.h>
2622     #include <asm/kvm_asm.h>
2623     #include <asm/kvm_coproc.h>
2624     #include <asm/kvm_emulate.h>
2625     #include <asm/kvm_mmu.h>
2626     -#include <asm/kvm_psci.h>
2627     #include <asm/debug-monitors.h>
2628    
2629     #define CREATE_TRACE_POINTS
2630     @@ -43,7 +44,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
2631     kvm_vcpu_hvc_get_imm(vcpu));
2632     vcpu->stat.hvc_exit_stat++;
2633    
2634     - ret = kvm_psci_call(vcpu);
2635     + ret = kvm_hvc_call_handler(vcpu);
2636     if (ret < 0) {
2637     vcpu_set_reg(vcpu, 0, ~0UL);
2638     return 1;
2639     @@ -54,7 +55,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
2640    
2641     static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
2642     {
2643     + /*
2644     + * "If an SMC instruction executed at Non-secure EL1 is
2645     + * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
2646     + * Trap exception, not a Secure Monitor Call exception [...]"
2647     + *
2648     + * We need to advance the PC after the trap, as it would
2649     + * otherwise return to the same address...
2650     + */
2651     vcpu_set_reg(vcpu, 0, ~0UL);
2652     + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
2653     return 1;
2654     }
2655    
2656     diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
2657     index 12ee62d6d410..9c45c6af1f58 100644
2658     --- a/arch/arm64/kvm/hyp/entry.S
2659     +++ b/arch/arm64/kvm/hyp/entry.S
2660     @@ -196,3 +196,15 @@ alternative_endif
2661    
2662     eret
2663     ENDPROC(__fpsimd_guest_restore)
2664     +
2665     +ENTRY(__qcom_hyp_sanitize_btac_predictors)
2666     + /**
2667     + * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700)
2668     + * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls
2669     + * b15-b0: contains SiP functionID
2670     + */
2671     + movz x0, #0x1700
2672     + movk x0, #0xc200, lsl #16
2673     + smc #0
2674     + ret
2675     +ENDPROC(__qcom_hyp_sanitize_btac_predictors)
2676     diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
2677     index 5170ce1021da..f49b53331d28 100644
2678     --- a/arch/arm64/kvm/hyp/hyp-entry.S
2679     +++ b/arch/arm64/kvm/hyp/hyp-entry.S
2680     @@ -15,6 +15,7 @@
2681     * along with this program. If not, see <http://www.gnu.org/licenses/>.
2682     */
2683    
2684     +#include <linux/arm-smccc.h>
2685     #include <linux/linkage.h>
2686    
2687     #include <asm/alternative.h>
2688     @@ -64,10 +65,11 @@ alternative_endif
2689     lsr x0, x1, #ESR_ELx_EC_SHIFT
2690    
2691     cmp x0, #ESR_ELx_EC_HVC64
2692     + ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
2693     b.ne el1_trap
2694    
2695     - mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest
2696     - cbnz x1, el1_trap // called HVC
2697     + mrs x1, vttbr_el2 // If vttbr is valid, the guest
2698     + cbnz x1, el1_hvc_guest // called HVC
2699    
2700     /* Here, we're pretty sure the host called HVC. */
2701     ldp x0, x1, [sp], #16
2702     @@ -100,6 +102,20 @@ alternative_endif
2703    
2704     eret
2705    
2706     +el1_hvc_guest:
2707     + /*
2708     + * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
2709     + * The workaround has already been applied on the host,
2710     + * so let's quickly get back to the guest. We don't bother
2711     + * restoring x1, as it can be clobbered anyway.
2712     + */
2713     + ldr x1, [sp] // Guest's x0
2714     + eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
2715     + cbnz w1, el1_trap
2716     + mov x0, x1
2717     + add sp, sp, #16
2718     + eret
2719     +
2720     el1_trap:
2721     /*
2722     * x0: ESR_EC
2723     diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
2724     index f7c651f3a8c0..0b5ab4d8b57d 100644
2725     --- a/arch/arm64/kvm/hyp/switch.c
2726     +++ b/arch/arm64/kvm/hyp/switch.c
2727     @@ -17,6 +17,9 @@
2728    
2729     #include <linux/types.h>
2730     #include <linux/jump_label.h>
2731     +#include <uapi/linux/psci.h>
2732     +
2733     +#include <kvm/arm_psci.h>
2734    
2735     #include <asm/kvm_asm.h>
2736     #include <asm/kvm_emulate.h>
2737     @@ -52,7 +55,7 @@ static void __hyp_text __activate_traps_vhe(void)
2738     val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
2739     write_sysreg(val, cpacr_el1);
2740    
2741     - write_sysreg(__kvm_hyp_vector, vbar_el1);
2742     + write_sysreg(kvm_get_hyp_vector(), vbar_el1);
2743     }
2744    
2745     static void __hyp_text __activate_traps_nvhe(void)
2746     @@ -393,6 +396,14 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
2747     /* 0 falls through to be handled out of EL2 */
2748     }
2749    
2750     + if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
2751     + u32 midr = read_cpuid_id();
2752     +
2753     + /* Apply BTAC predictors mitigation to all Falkor chips */
2754     + if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)
2755     + __qcom_hyp_sanitize_btac_predictors();
2756     + }
2757     +
2758     fp_enabled = __fpsimd_enabled();
2759    
2760     __sysreg_save_guest_state(guest_ctxt);
2761     diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
2762     index e88fb99c1561..21ba0b29621b 100644
2763     --- a/arch/arm64/lib/clear_user.S
2764     +++ b/arch/arm64/lib/clear_user.S
2765     @@ -21,7 +21,7 @@
2766    
2767     .text
2768    
2769     -/* Prototype: int __clear_user(void *addr, size_t sz)
2770     +/* Prototype: int __arch_clear_user(void *addr, size_t sz)
2771     * Purpose : clear some user memory
2772     * Params : addr - user memory address to clear
2773     * : sz - number of bytes to clear
2774     @@ -29,8 +29,8 @@
2775     *
2776     * Alignment fixed up by hardware.
2777     */
2778     -ENTRY(__clear_user)
2779     - uaccess_enable_not_uao x2, x3
2780     +ENTRY(__arch_clear_user)
2781     + uaccess_enable_not_uao x2, x3, x4
2782     mov x2, x1 // save the size for fixup return
2783     subs x1, x1, #8
2784     b.mi 2f
2785     @@ -50,9 +50,9 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
2786     b.mi 5f
2787     uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
2788     5: mov x0, #0
2789     - uaccess_disable_not_uao x2
2790     + uaccess_disable_not_uao x2, x3
2791     ret
2792     -ENDPROC(__clear_user)
2793     +ENDPROC(__arch_clear_user)
2794    
2795     .section .fixup,"ax"
2796     .align 2
2797     diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
2798     index 4b5d826895ff..20305d485046 100644
2799     --- a/arch/arm64/lib/copy_from_user.S
2800     +++ b/arch/arm64/lib/copy_from_user.S
2801     @@ -64,10 +64,10 @@
2802    
2803     end .req x5
2804     ENTRY(__arch_copy_from_user)
2805     - uaccess_enable_not_uao x3, x4
2806     + uaccess_enable_not_uao x3, x4, x5
2807     add end, x0, x2
2808     #include "copy_template.S"
2809     - uaccess_disable_not_uao x3
2810     + uaccess_disable_not_uao x3, x4
2811     mov x0, #0 // Nothing to copy
2812     ret
2813     ENDPROC(__arch_copy_from_user)
2814     diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
2815     index b24a830419ad..54b75deb1d16 100644
2816     --- a/arch/arm64/lib/copy_in_user.S
2817     +++ b/arch/arm64/lib/copy_in_user.S
2818     @@ -64,14 +64,15 @@
2819     .endm
2820    
2821     end .req x5
2822     -ENTRY(raw_copy_in_user)
2823     - uaccess_enable_not_uao x3, x4
2824     +
2825     +ENTRY(__arch_copy_in_user)
2826     + uaccess_enable_not_uao x3, x4, x5
2827     add end, x0, x2
2828     #include "copy_template.S"
2829     - uaccess_disable_not_uao x3
2830     + uaccess_disable_not_uao x3, x4
2831     mov x0, #0
2832     ret
2833     -ENDPROC(raw_copy_in_user)
2834     +ENDPROC(__arch_copy_in_user)
2835    
2836     .section .fixup,"ax"
2837     .align 2
2838     diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
2839     index 351f0766f7a6..fda6172d6b88 100644
2840     --- a/arch/arm64/lib/copy_to_user.S
2841     +++ b/arch/arm64/lib/copy_to_user.S
2842     @@ -63,10 +63,10 @@
2843    
2844     end .req x5
2845     ENTRY(__arch_copy_to_user)
2846     - uaccess_enable_not_uao x3, x4
2847     + uaccess_enable_not_uao x3, x4, x5
2848     add end, x0, x2
2849     #include "copy_template.S"
2850     - uaccess_disable_not_uao x3
2851     + uaccess_disable_not_uao x3, x4
2852     mov x0, #0
2853     ret
2854     ENDPROC(__arch_copy_to_user)
2855     diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
2856     index 7f1dbe962cf5..91464e7f77cc 100644
2857     --- a/arch/arm64/mm/cache.S
2858     +++ b/arch/arm64/mm/cache.S
2859     @@ -49,7 +49,7 @@ ENTRY(flush_icache_range)
2860     * - end - virtual end address of region
2861     */
2862     ENTRY(__flush_cache_user_range)
2863     - uaccess_ttbr0_enable x2, x3
2864     + uaccess_ttbr0_enable x2, x3, x4
2865     dcache_line_size x2, x3
2866     sub x3, x2, #1
2867     bic x4, x0, x3
2868     @@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
2869     isb
2870     mov x0, #0
2871     1:
2872     - uaccess_ttbr0_disable x1
2873     + uaccess_ttbr0_disable x1, x2
2874     ret
2875     9:
2876     mov x0, #-EFAULT
2877     diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
2878     index 6f4017046323..b1ac80fba578 100644
2879     --- a/arch/arm64/mm/context.c
2880     +++ b/arch/arm64/mm/context.c
2881     @@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending;
2882    
2883     #define ASID_MASK (~GENMASK(asid_bits - 1, 0))
2884     #define ASID_FIRST_VERSION (1UL << asid_bits)
2885     -#define NUM_USER_ASIDS ASID_FIRST_VERSION
2886     +
2887     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2888     +#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
2889     +#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
2890     +#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
2891     +#else
2892     +#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
2893     +#define asid2idx(asid) ((asid) & ~ASID_MASK)
2894     +#define idx2asid(idx) asid2idx(idx)
2895     +#endif
2896    
2897     /* Get the ASIDBits supported by the current CPU */
2898     static u32 get_cpu_asid_bits(void)
2899     @@ -79,13 +88,6 @@ void verify_cpu_asid_bits(void)
2900     }
2901     }
2902    
2903     -static void set_reserved_asid_bits(void)
2904     -{
2905     - if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) &&
2906     - cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003))
2907     - __set_bit(FALKOR_RESERVED_ASID, asid_map);
2908     -}
2909     -
2910     static void flush_context(unsigned int cpu)
2911     {
2912     int i;
2913     @@ -94,8 +96,6 @@ static void flush_context(unsigned int cpu)
2914     /* Update the list of reserved ASIDs and the ASID bitmap. */
2915     bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
2916    
2917     - set_reserved_asid_bits();
2918     -
2919     for_each_possible_cpu(i) {
2920     asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
2921     /*
2922     @@ -107,7 +107,7 @@ static void flush_context(unsigned int cpu)
2923     */
2924     if (asid == 0)
2925     asid = per_cpu(reserved_asids, i);
2926     - __set_bit(asid & ~ASID_MASK, asid_map);
2927     + __set_bit(asid2idx(asid), asid_map);
2928     per_cpu(reserved_asids, i) = asid;
2929     }
2930    
2931     @@ -162,16 +162,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
2932     * We had a valid ASID in a previous life, so try to re-use
2933     * it if possible.
2934     */
2935     - asid &= ~ASID_MASK;
2936     - if (!__test_and_set_bit(asid, asid_map))
2937     + if (!__test_and_set_bit(asid2idx(asid), asid_map))
2938     return newasid;
2939     }
2940    
2941     /*
2942     * Allocate a free ASID. If we can't find one, take a note of the
2943     - * currently active ASIDs and mark the TLBs as requiring flushes.
2944     - * We always count from ASID #1, as we use ASID #0 when setting a
2945     - * reserved TTBR0 for the init_mm.
2946     + * currently active ASIDs and mark the TLBs as requiring flushes. We
2947     + * always count from ASID #2 (index 1), as we use ASID #0 when setting
2948     + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
2949     + * pairs.
2950     */
2951     asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
2952     if (asid != NUM_USER_ASIDS)
2953     @@ -188,7 +188,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
2954     set_asid:
2955     __set_bit(asid, asid_map);
2956     cur_idx = asid;
2957     - return asid | generation;
2958     + return idx2asid(asid) | generation;
2959     }
2960    
2961     void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
2962     @@ -231,6 +231,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
2963     raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
2964    
2965     switch_mm_fastpath:
2966     +
2967     + arm64_apply_bp_hardening();
2968     +
2969     /*
2970     * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
2971     * emulating PAN.
2972     @@ -239,6 +242,15 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
2973     cpu_switch_mm(mm->pgd, mm);
2974     }
2975    
2976     +/* Errata workaround post TTBRx_EL1 update. */
2977     +asmlinkage void post_ttbr_update_workaround(void)
2978     +{
2979     + asm(ALTERNATIVE("nop; nop; nop",
2980     + "ic iallu; dsb nsh; isb",
2981     + ARM64_WORKAROUND_CAVIUM_27456,
2982     + CONFIG_CAVIUM_ERRATUM_27456));
2983     +}
2984     +
2985     static int asids_init(void)
2986     {
2987     asid_bits = get_cpu_asid_bits();
2988     @@ -254,8 +266,6 @@ static int asids_init(void)
2989     panic("Failed to allocate bitmap for %lu ASIDs\n",
2990     NUM_USER_ASIDS);
2991    
2992     - set_reserved_asid_bits();
2993     -
2994     pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
2995     return 0;
2996     }
2997     diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
2998     index 9b7f89df49db..dd8f5197b549 100644
2999     --- a/arch/arm64/mm/fault.c
3000     +++ b/arch/arm64/mm/fault.c
3001     @@ -240,7 +240,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
3002     if (fsc_type == ESR_ELx_FSC_PERM)
3003     return true;
3004    
3005     - if (addr < USER_DS && system_uses_ttbr0_pan())
3006     + if (addr < TASK_SIZE && system_uses_ttbr0_pan())
3007     return fsc_type == ESR_ELx_FSC_FAULT &&
3008     (regs->pstate & PSR_PAN_BIT);
3009    
3010     @@ -414,7 +414,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
3011     mm_flags |= FAULT_FLAG_WRITE;
3012     }
3013    
3014     - if (addr < USER_DS && is_permission_fault(esr, regs, addr)) {
3015     + if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) {
3016     /* regs->orig_addr_limit may be 0 if we entered from EL0 */
3017     if (regs->orig_addr_limit == KERNEL_DS)
3018     die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
3019     @@ -707,6 +707,29 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
3020     arm64_notify_die("", regs, &info, esr);
3021     }
3022    
3023     +asmlinkage void __exception do_el0_irq_bp_hardening(void)
3024     +{
3025     + /* PC has already been checked in entry.S */
3026     + arm64_apply_bp_hardening();
3027     +}
3028     +
3029     +asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
3030     + unsigned int esr,
3031     + struct pt_regs *regs)
3032     +{
3033     + /*
3034     + * We've taken an instruction abort from userspace and not yet
3035     + * re-enabled IRQs. If the address is a kernel address, apply
3036     + * BP hardening prior to enabling IRQs and pre-emption.
3037     + */
3038     + if (addr > TASK_SIZE)
3039     + arm64_apply_bp_hardening();
3040     +
3041     + local_irq_enable();
3042     + do_mem_abort(addr, esr, regs);
3043     +}
3044     +
3045     +
3046     asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
3047     unsigned int esr,
3048     struct pt_regs *regs)
3049     @@ -714,6 +737,12 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
3050     struct siginfo info;
3051     struct task_struct *tsk = current;
3052    
3053     + if (user_mode(regs)) {
3054     + if (instruction_pointer(regs) > TASK_SIZE)
3055     + arm64_apply_bp_hardening();
3056     + local_irq_enable();
3057     + }
3058     +
3059     if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
3060     pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
3061     tsk->comm, task_pid_nr(tsk),
3062     @@ -773,6 +802,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
3063     if (interrupts_enabled(regs))
3064     trace_hardirqs_off();
3065    
3066     + if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
3067     + arm64_apply_bp_hardening();
3068     +
3069     if (!inf->fn(addr, esr, regs)) {
3070     rv = 1;
3071     } else {
3072     diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
3073     index 267d2b79d52d..451f96f3377c 100644
3074     --- a/arch/arm64/mm/mmu.c
3075     +++ b/arch/arm64/mm/mmu.c
3076     @@ -117,6 +117,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
3077     if ((old | new) & PTE_CONT)
3078     return false;
3079    
3080     + /* Transitioning from Global to Non-Global is safe */
3081     + if (((old ^ new) == PTE_NG) && (new & PTE_NG))
3082     + return true;
3083     +
3084     return ((old ^ new) & ~mask) == 0;
3085     }
3086    
3087     @@ -525,6 +529,37 @@ static int __init parse_rodata(char *arg)
3088     }
3089     early_param("rodata", parse_rodata);
3090    
3091     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
3092     +static int __init map_entry_trampoline(void)
3093     +{
3094     + extern char __entry_tramp_text_start[];
3095     +
3096     + pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
3097     + phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
3098     +
3099     + /* The trampoline is always mapped and can therefore be global */
3100     + pgprot_val(prot) &= ~PTE_NG;
3101     +
3102     + /* Map only the text into the trampoline page table */
3103     + memset(tramp_pg_dir, 0, PGD_SIZE);
3104     + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
3105     + prot, pgd_pgtable_alloc, 0);
3106     +
3107     + /* Map both the text and data into the kernel page table */
3108     + __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
3109     + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
3110     + extern char __entry_tramp_data_start[];
3111     +
3112     + __set_fixmap(FIX_ENTRY_TRAMP_DATA,
3113     + __pa_symbol(__entry_tramp_data_start),
3114     + PAGE_KERNEL_RO);
3115     + }
3116     +
3117     + return 0;
3118     +}
3119     +core_initcall(map_entry_trampoline);
3120     +#endif
3121     +
3122     /*
3123     * Create fine-grained mappings for the kernel.
3124     */
3125     diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
3126     index 95233dfc4c39..08572f95bd8a 100644
3127     --- a/arch/arm64/mm/proc.S
3128     +++ b/arch/arm64/mm/proc.S
3129     @@ -86,7 +86,7 @@ ENDPROC(cpu_do_suspend)
3130     *
3131     * x0: Address of context pointer
3132     */
3133     - .pushsection ".idmap.text", "ax"
3134     + .pushsection ".idmap.text", "awx"
3135     ENTRY(cpu_do_resume)
3136     ldp x2, x3, [x0]
3137     ldp x4, x5, [x0, #16]
3138     @@ -138,16 +138,30 @@ ENDPROC(cpu_do_resume)
3139     * - pgd_phys - physical address of new TTB
3140     */
3141     ENTRY(cpu_do_switch_mm)
3142     - pre_ttbr0_update_workaround x0, x2, x3
3143     + mrs x2, ttbr1_el1
3144     mmid x1, x1 // get mm->context.id
3145     - bfi x0, x1, #48, #16 // set the ASID
3146     - msr ttbr0_el1, x0 // set TTBR0
3147     +#ifdef CONFIG_ARM64_SW_TTBR0_PAN
3148     + bfi x0, x1, #48, #16 // set the ASID field in TTBR0
3149     +#endif
3150     + bfi x2, x1, #48, #16 // set the ASID
3151     + msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set)
3152     isb
3153     - post_ttbr0_update_workaround
3154     - ret
3155     + msr ttbr0_el1, x0 // now update TTBR0
3156     + isb
3157     + b post_ttbr_update_workaround // Back to C code...
3158     ENDPROC(cpu_do_switch_mm)
3159    
3160     - .pushsection ".idmap.text", "ax"
3161     + .pushsection ".idmap.text", "awx"
3162     +
3163     +.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
3164     + adrp \tmp1, empty_zero_page
3165     + msr ttbr1_el1, \tmp2
3166     + isb
3167     + tlbi vmalle1
3168     + dsb nsh
3169     + isb
3170     +.endm
3171     +
3172     /*
3173     * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
3174     *
3175     @@ -157,13 +171,7 @@ ENDPROC(cpu_do_switch_mm)
3176     ENTRY(idmap_cpu_replace_ttbr1)
3177     save_and_disable_daif flags=x2
3178    
3179     - adrp x1, empty_zero_page
3180     - msr ttbr1_el1, x1
3181     - isb
3182     -
3183     - tlbi vmalle1
3184     - dsb nsh
3185     - isb
3186     + __idmap_cpu_set_reserved_ttbr1 x1, x3
3187    
3188     msr ttbr1_el1, x0
3189     isb
3190     @@ -174,13 +182,197 @@ ENTRY(idmap_cpu_replace_ttbr1)
3191     ENDPROC(idmap_cpu_replace_ttbr1)
3192     .popsection
3193    
3194     +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
3195     + .pushsection ".idmap.text", "awx"
3196     +
3197     + .macro __idmap_kpti_get_pgtable_ent, type
3198     + dc cvac, cur_\()\type\()p // Ensure any existing dirty
3199     + dmb sy // lines are written back before
3200     + ldr \type, [cur_\()\type\()p] // loading the entry
3201     + tbz \type, #0, next_\()\type // Skip invalid entries
3202     + .endm
3203     +
3204     + .macro __idmap_kpti_put_pgtable_ent_ng, type
3205     + orr \type, \type, #PTE_NG // Same bit for blocks and pages
3206     + str \type, [cur_\()\type\()p] // Update the entry and ensure it
3207     + dc civac, cur_\()\type\()p // is visible to all CPUs.
3208     + .endm
3209     +
3210     +/*
3211     + * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
3212     + *
3213     + * Called exactly once from stop_machine context by each CPU found during boot.
3214     + */
3215     +__idmap_kpti_flag:
3216     + .long 1
3217     +ENTRY(idmap_kpti_install_ng_mappings)
3218     + cpu .req w0
3219     + num_cpus .req w1
3220     + swapper_pa .req x2
3221     + swapper_ttb .req x3
3222     + flag_ptr .req x4
3223     + cur_pgdp .req x5
3224     + end_pgdp .req x6
3225     + pgd .req x7
3226     + cur_pudp .req x8
3227     + end_pudp .req x9
3228     + pud .req x10
3229     + cur_pmdp .req x11
3230     + end_pmdp .req x12
3231     + pmd .req x13
3232     + cur_ptep .req x14
3233     + end_ptep .req x15
3234     + pte .req x16
3235     +
3236     + mrs swapper_ttb, ttbr1_el1
3237     + adr flag_ptr, __idmap_kpti_flag
3238     +
3239     + cbnz cpu, __idmap_kpti_secondary
3240     +
3241     + /* We're the boot CPU. Wait for the others to catch up */
3242     + sevl
3243     +1: wfe
3244     + ldaxr w18, [flag_ptr]
3245     + eor w18, w18, num_cpus
3246     + cbnz w18, 1b
3247     +
3248     + /* We need to walk swapper, so turn off the MMU. */
3249     + pre_disable_mmu_workaround
3250     + mrs x18, sctlr_el1
3251     + bic x18, x18, #SCTLR_ELx_M
3252     + msr sctlr_el1, x18
3253     + isb
3254     +
3255     + /* Everybody is enjoying the idmap, so we can rewrite swapper. */
3256     + /* PGD */
3257     + mov cur_pgdp, swapper_pa
3258     + add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
3259     +do_pgd: __idmap_kpti_get_pgtable_ent pgd
3260     + tbnz pgd, #1, walk_puds
3261     + __idmap_kpti_put_pgtable_ent_ng pgd
3262     +next_pgd:
3263     + add cur_pgdp, cur_pgdp, #8
3264     + cmp cur_pgdp, end_pgdp
3265     + b.ne do_pgd
3266     +
3267     + /* Publish the updated tables and nuke all the TLBs */
3268     + dsb sy
3269     + tlbi vmalle1is
3270     + dsb ish
3271     + isb
3272     +
3273     + /* We're done: fire up the MMU again */
3274     + mrs x18, sctlr_el1
3275     + orr x18, x18, #SCTLR_ELx_M
3276     + msr sctlr_el1, x18
3277     + isb
3278     +
3279     + /* Set the flag to zero to indicate that we're all done */
3280     + str wzr, [flag_ptr]
3281     + ret
3282     +
3283     + /* PUD */
3284     +walk_puds:
3285     + .if CONFIG_PGTABLE_LEVELS > 3
3286     + pte_to_phys cur_pudp, pgd
3287     + add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
3288     +do_pud: __idmap_kpti_get_pgtable_ent pud
3289     + tbnz pud, #1, walk_pmds
3290     + __idmap_kpti_put_pgtable_ent_ng pud
3291     +next_pud:
3292     + add cur_pudp, cur_pudp, 8
3293     + cmp cur_pudp, end_pudp
3294     + b.ne do_pud
3295     + b next_pgd
3296     + .else /* CONFIG_PGTABLE_LEVELS <= 3 */
3297     + mov pud, pgd
3298     + b walk_pmds
3299     +next_pud:
3300     + b next_pgd
3301     + .endif
3302     +
3303     + /* PMD */
3304     +walk_pmds:
3305     + .if CONFIG_PGTABLE_LEVELS > 2
3306     + pte_to_phys cur_pmdp, pud
3307     + add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
3308     +do_pmd: __idmap_kpti_get_pgtable_ent pmd
3309     + tbnz pmd, #1, walk_ptes
3310     + __idmap_kpti_put_pgtable_ent_ng pmd
3311     +next_pmd:
3312     + add cur_pmdp, cur_pmdp, #8
3313     + cmp cur_pmdp, end_pmdp
3314     + b.ne do_pmd
3315     + b next_pud
3316     + .else /* CONFIG_PGTABLE_LEVELS <= 2 */
3317     + mov pmd, pud
3318     + b walk_ptes
3319     +next_pmd:
3320     + b next_pud
3321     + .endif
3322     +
3323     + /* PTE */
3324     +walk_ptes:
3325     + pte_to_phys cur_ptep, pmd
3326     + add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
3327     +do_pte: __idmap_kpti_get_pgtable_ent pte
3328     + __idmap_kpti_put_pgtable_ent_ng pte
3329     +next_pte:
3330     + add cur_ptep, cur_ptep, #8
3331     + cmp cur_ptep, end_ptep
3332     + b.ne do_pte
3333     + b next_pmd
3334     +
3335     + /* Secondary CPUs end up here */
3336     +__idmap_kpti_secondary:
3337     + /* Uninstall swapper before surgery begins */
3338     + __idmap_cpu_set_reserved_ttbr1 x18, x17
3339     +
3340     + /* Increment the flag to let the boot CPU we're ready */
3341     +1: ldxr w18, [flag_ptr]
3342     + add w18, w18, #1
3343     + stxr w17, w18, [flag_ptr]
3344     + cbnz w17, 1b
3345     +
3346     + /* Wait for the boot CPU to finish messing around with swapper */
3347     + sevl
3348     +1: wfe
3349     + ldxr w18, [flag_ptr]
3350     + cbnz w18, 1b
3351     +
3352     + /* All done, act like nothing happened */
3353     + msr ttbr1_el1, swapper_ttb
3354     + isb
3355     + ret
3356     +
3357     + .unreq cpu
3358     + .unreq num_cpus
3359     + .unreq swapper_pa
3360     + .unreq swapper_ttb
3361     + .unreq flag_ptr
3362     + .unreq cur_pgdp
3363     + .unreq end_pgdp
3364     + .unreq pgd
3365     + .unreq cur_pudp
3366     + .unreq end_pudp
3367     + .unreq pud
3368     + .unreq cur_pmdp
3369     + .unreq end_pmdp
3370     + .unreq pmd
3371     + .unreq cur_ptep
3372     + .unreq end_ptep
3373     + .unreq pte
3374     +ENDPROC(idmap_kpti_install_ng_mappings)
3375     + .popsection
3376     +#endif
3377     +
3378     /*
3379     * __cpu_setup
3380     *
3381     * Initialise the processor for turning the MMU on. Return in x0 the
3382     * value of the SCTLR_EL1 register.
3383     */
3384     - .pushsection ".idmap.text", "ax"
3385     + .pushsection ".idmap.text", "awx"
3386     ENTRY(__cpu_setup)
3387     tlbi vmalle1 // Invalidate local TLB
3388     dsb nsh
3389     @@ -224,7 +416,7 @@ ENTRY(__cpu_setup)
3390     * both user and kernel.
3391     */
3392     ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
3393     - TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
3394     + TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1
3395     tcr_set_idmap_t0sz x10, x9
3396    
3397     /*
3398     diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
3399     index 401ceb71540c..c5f05c4a4d00 100644
3400     --- a/arch/arm64/xen/hypercall.S
3401     +++ b/arch/arm64/xen/hypercall.S
3402     @@ -101,12 +101,12 @@ ENTRY(privcmd_call)
3403     * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
3404     * is enabled (it implies that hardware UAO and PAN disabled).
3405     */
3406     - uaccess_ttbr0_enable x6, x7
3407     + uaccess_ttbr0_enable x6, x7, x8
3408     hvc XEN_IMM
3409    
3410     /*
3411     * Disable userspace access from kernel once the hyp call completed.
3412     */
3413     - uaccess_ttbr0_disable x6
3414     + uaccess_ttbr0_disable x6, x7
3415     ret
3416     ENDPROC(privcmd_call);
3417     diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
3418     index b39a388825ae..8ace89617c1c 100644
3419     --- a/arch/mn10300/mm/misalignment.c
3420     +++ b/arch/mn10300/mm/misalignment.c
3421     @@ -437,7 +437,7 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code)
3422    
3423     info.si_signo = SIGSEGV;
3424     info.si_errno = 0;
3425     - info.si_code = 0;
3426     + info.si_code = SEGV_MAPERR;
3427     info.si_addr = (void *) regs->pc;
3428     force_sig_info(SIGSEGV, &info, current);
3429     return;
3430     diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
3431     index 4085d72fa5ae..9e38dc66c9e4 100644
3432     --- a/arch/openrisc/kernel/traps.c
3433     +++ b/arch/openrisc/kernel/traps.c
3434     @@ -266,12 +266,12 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
3435     siginfo_t info;
3436    
3437     if (user_mode(regs)) {
3438     - /* Send a SIGSEGV */
3439     - info.si_signo = SIGSEGV;
3440     + /* Send a SIGBUS */
3441     + info.si_signo = SIGBUS;
3442     info.si_errno = 0;
3443     - /* info.si_code has been set above */
3444     - info.si_addr = (void *)address;
3445     - force_sig_info(SIGSEGV, &info, current);
3446     + info.si_code = BUS_ADRALN;
3447     + info.si_addr = (void __user *)address;
3448     + force_sig_info(SIGBUS, &info, current);
3449     } else {
3450     printk("KERNEL: Unaligned Access 0x%.8lx\n", address);
3451     show_registers(regs);
3452     diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
3453     index f058e0c3e4d4..fd1d6c83f0c0 100644
3454     --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
3455     +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
3456     @@ -141,6 +141,7 @@ static struct shash_alg alg = {
3457     .cra_name = "crc32c",
3458     .cra_driver_name = "crc32c-vpmsum",
3459     .cra_priority = 200,
3460     + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3461     .cra_blocksize = CHKSUM_BLOCK_SIZE,
3462     .cra_ctxsize = sizeof(u32),
3463     .cra_module = THIS_MODULE,
3464     diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
3465     index b12b8eb39c29..648160334abf 100644
3466     --- a/arch/powerpc/kvm/Kconfig
3467     +++ b/arch/powerpc/kvm/Kconfig
3468     @@ -68,7 +68,7 @@ config KVM_BOOK3S_64
3469     select KVM_BOOK3S_64_HANDLER
3470     select KVM
3471     select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
3472     - select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV)
3473     + select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV)
3474     ---help---
3475     Support running unmodified book3s_64 and book3s_32 guest kernels
3476     in virtual machines on book3s_64 host processors.
3477     diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
3478     index 2d46037ce936..6c402f6c4940 100644
3479     --- a/arch/powerpc/kvm/book3s_hv.c
3480     +++ b/arch/powerpc/kvm/book3s_hv.c
3481     @@ -1005,8 +1005,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
3482     struct kvm *kvm = vcpu->kvm;
3483     struct kvm_vcpu *tvcpu;
3484    
3485     - if (!cpu_has_feature(CPU_FTR_ARCH_300))
3486     - return EMULATE_FAIL;
3487     if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
3488     return RESUME_GUEST;
3489     if (get_op(inst) != 31)
3490     @@ -1056,6 +1054,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
3491     return RESUME_GUEST;
3492     }
3493    
3494     +/* Called with vcpu->arch.vcore->lock held */
3495     static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
3496     struct task_struct *tsk)
3497     {
3498     @@ -1176,7 +1175,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
3499     swab32(vcpu->arch.emul_inst) :
3500     vcpu->arch.emul_inst;
3501     if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
3502     + /* Need vcore unlocked to call kvmppc_get_last_inst */
3503     + spin_unlock(&vcpu->arch.vcore->lock);
3504     r = kvmppc_emulate_debug_inst(run, vcpu);
3505     + spin_lock(&vcpu->arch.vcore->lock);
3506     } else {
3507     kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
3508     r = RESUME_GUEST;
3509     @@ -1191,8 +1193,13 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
3510     */
3511     case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
3512     r = EMULATE_FAIL;
3513     - if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG)
3514     + if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
3515     + cpu_has_feature(CPU_FTR_ARCH_300)) {
3516     + /* Need vcore unlocked to call kvmppc_get_last_inst */
3517     + spin_unlock(&vcpu->arch.vcore->lock);
3518     r = kvmppc_emulate_doorbell_instr(vcpu);
3519     + spin_lock(&vcpu->arch.vcore->lock);
3520     + }
3521     if (r == EMULATE_FAIL) {
3522     kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
3523     r = RESUME_GUEST;
3524     @@ -2934,13 +2941,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
3525     /* make sure updates to secondary vcpu structs are visible now */
3526     smp_mb();
3527    
3528     + preempt_enable();
3529     +
3530     for (sub = 0; sub < core_info.n_subcores; ++sub) {
3531     pvc = core_info.vc[sub];
3532     post_guest_process(pvc, pvc == vc);
3533     }
3534    
3535     spin_lock(&vc->lock);
3536     - preempt_enable();
3537    
3538     out:
3539     vc->vcore_state = VCORE_INACTIVE;
3540     diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
3541     index 9c61f736c75b..ffec37062f3b 100644
3542     --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
3543     +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
3544     @@ -1423,6 +1423,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
3545     blt deliver_guest_interrupt
3546    
3547     guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
3548     + /* Save more register state */
3549     + mfdar r6
3550     + mfdsisr r7
3551     + std r6, VCPU_DAR(r9)
3552     + stw r7, VCPU_DSISR(r9)
3553     + /* don't overwrite fault_dar/fault_dsisr if HDSI */
3554     + cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
3555     + beq mc_cont
3556     + std r6, VCPU_FAULT_DAR(r9)
3557     + stw r7, VCPU_FAULT_DSISR(r9)
3558     +
3559     + /* See if it is a machine check */
3560     + cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
3561     + beq machine_check_realmode
3562     +mc_cont:
3563     +#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3564     + addi r3, r9, VCPU_TB_RMEXIT
3565     + mr r4, r9
3566     + bl kvmhv_accumulate_time
3567     +#endif
3568     #ifdef CONFIG_KVM_XICS
3569     /* We are exiting, pull the VP from the XIVE */
3570     lwz r0, VCPU_XIVE_PUSHED(r9)
3571     @@ -1460,26 +1480,6 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
3572     eieio
3573     1:
3574     #endif /* CONFIG_KVM_XICS */
3575     - /* Save more register state */
3576     - mfdar r6
3577     - mfdsisr r7
3578     - std r6, VCPU_DAR(r9)
3579     - stw r7, VCPU_DSISR(r9)
3580     - /* don't overwrite fault_dar/fault_dsisr if HDSI */
3581     - cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
3582     - beq mc_cont
3583     - std r6, VCPU_FAULT_DAR(r9)
3584     - stw r7, VCPU_FAULT_DSISR(r9)
3585     -
3586     - /* See if it is a machine check */
3587     - cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
3588     - beq machine_check_realmode
3589     -mc_cont:
3590     -#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3591     - addi r3, r9, VCPU_TB_RMEXIT
3592     - mr r4, r9
3593     - bl kvmhv_accumulate_time
3594     -#endif
3595    
3596     mr r3, r12
3597     /* Increment exit count, poke other threads to exit */
3598     diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
3599     index 436865926c26..423ee05887e6 100644
3600     --- a/arch/s390/crypto/crc32-vx.c
3601     +++ b/arch/s390/crypto/crc32-vx.c
3602     @@ -239,6 +239,7 @@ static struct shash_alg crc32_vx_algs[] = {
3603     .cra_name = "crc32",
3604     .cra_driver_name = "crc32-vx",
3605     .cra_priority = 200,
3606     + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3607     .cra_blocksize = CRC32_BLOCK_SIZE,
3608     .cra_ctxsize = sizeof(struct crc_ctx),
3609     .cra_module = THIS_MODULE,
3610     @@ -259,6 +260,7 @@ static struct shash_alg crc32_vx_algs[] = {
3611     .cra_name = "crc32be",
3612     .cra_driver_name = "crc32be-vx",
3613     .cra_priority = 200,
3614     + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3615     .cra_blocksize = CRC32_BLOCK_SIZE,
3616     .cra_ctxsize = sizeof(struct crc_ctx),
3617     .cra_module = THIS_MODULE,
3618     @@ -279,6 +281,7 @@ static struct shash_alg crc32_vx_algs[] = {
3619     .cra_name = "crc32c",
3620     .cra_driver_name = "crc32c-vx",
3621     .cra_priority = 200,
3622     + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3623     .cra_blocksize = CRC32_BLOCK_SIZE,
3624     .cra_ctxsize = sizeof(struct crc_ctx),
3625     .cra_module = THIS_MODULE,
3626     diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
3627     index 57cff00cad17..b3770bb26211 100644
3628     --- a/arch/sh/kernel/traps_32.c
3629     +++ b/arch/sh/kernel/traps_32.c
3630     @@ -609,7 +609,8 @@ asmlinkage void do_divide_error(unsigned long r4)
3631     break;
3632     }
3633    
3634     - force_sig_info(SIGFPE, &info, current);
3635     + info.si_signo = SIGFPE;
3636     + force_sig_info(info.si_signo, &info, current);
3637     }
3638     #endif
3639    
3640     diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
3641     index d1064e46efe8..8aa664638c3c 100644
3642     --- a/arch/sparc/crypto/crc32c_glue.c
3643     +++ b/arch/sparc/crypto/crc32c_glue.c
3644     @@ -133,6 +133,7 @@ static struct shash_alg alg = {
3645     .cra_name = "crc32c",
3646     .cra_driver_name = "crc32c-sparc64",
3647     .cra_priority = SPARC_CR_OPCODE_PRIORITY,
3648     + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3649     .cra_blocksize = CHKSUM_BLOCK_SIZE,
3650     .cra_ctxsize = sizeof(u32),
3651     .cra_alignmask = 7,
3652     diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
3653     index 27226df3f7d8..c8d9cdacbf10 100644
3654     --- a/arch/x86/crypto/crc32-pclmul_glue.c
3655     +++ b/arch/x86/crypto/crc32-pclmul_glue.c
3656     @@ -162,6 +162,7 @@ static struct shash_alg alg = {
3657     .cra_name = "crc32",
3658     .cra_driver_name = "crc32-pclmul",
3659     .cra_priority = 200,
3660     + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3661     .cra_blocksize = CHKSUM_BLOCK_SIZE,
3662     .cra_ctxsize = sizeof(u32),
3663     .cra_module = THIS_MODULE,
3664     diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
3665     index c194d5717ae5..5773e1161072 100644
3666     --- a/arch/x86/crypto/crc32c-intel_glue.c
3667     +++ b/arch/x86/crypto/crc32c-intel_glue.c
3668     @@ -226,6 +226,7 @@ static struct shash_alg alg = {
3669     .cra_name = "crc32c",
3670     .cra_driver_name = "crc32c-intel",
3671     .cra_priority = 200,
3672     + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3673     .cra_blocksize = CHKSUM_BLOCK_SIZE,
3674     .cra_ctxsize = sizeof(u32),
3675     .cra_module = THIS_MODULE,
3676     diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
3677     index e32142bc071d..28c372003e44 100644
3678     --- a/arch/x86/crypto/poly1305_glue.c
3679     +++ b/arch/x86/crypto/poly1305_glue.c
3680     @@ -164,7 +164,6 @@ static struct shash_alg alg = {
3681     .init = poly1305_simd_init,
3682     .update = poly1305_simd_update,
3683     .final = crypto_poly1305_final,
3684     - .setkey = crypto_poly1305_setkey,
3685     .descsize = sizeof(struct poly1305_simd_desc_ctx),
3686     .base = {
3687     .cra_name = "poly1305",
3688     diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
3689     index 36870b26067a..d08805032f01 100644
3690     --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
3691     +++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
3692     @@ -57,10 +57,12 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
3693     {
3694     unsigned int j;
3695    
3696     - state->lens[0] = 0;
3697     - state->lens[1] = 1;
3698     - state->lens[2] = 2;
3699     - state->lens[3] = 3;
3700     + /* initially all lanes are unused */
3701     + state->lens[0] = 0xFFFFFFFF00000000;
3702     + state->lens[1] = 0xFFFFFFFF00000001;
3703     + state->lens[2] = 0xFFFFFFFF00000002;
3704     + state->lens[3] = 0xFFFFFFFF00000003;
3705     +
3706     state->unused_lanes = 0xFF03020100;
3707     for (j = 0; j < 4; j++)
3708     state->ldata[j].job_in_lane = NULL;
3709     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
3710     index bee4c49f6dd0..6f623848260f 100644
3711     --- a/arch/x86/kvm/vmx.c
3712     +++ b/arch/x86/kvm/vmx.c
3713     @@ -5323,14 +5323,15 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
3714    
3715     if (is_guest_mode(vcpu) &&
3716     vector == vmx->nested.posted_intr_nv) {
3717     - /* the PIR and ON have been set by L1. */
3718     - kvm_vcpu_trigger_posted_interrupt(vcpu, true);
3719     /*
3720     * If a posted intr is not recognized by hardware,
3721     * we will accomplish it in the next vmentry.
3722     */
3723     vmx->nested.pi_pending = true;
3724     kvm_make_request(KVM_REQ_EVENT, vcpu);
3725     + /* the PIR and ON have been set by L1. */
3726     + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true))
3727     + kvm_vcpu_kick(vcpu);
3728     return 0;
3729     }
3730     return -1;
3731     @@ -11254,7 +11255,6 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
3732     if (block_nested_events)
3733     return -EBUSY;
3734     nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3735     - vcpu->arch.exception.pending = false;
3736     return 0;
3737     }
3738    
3739     diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
3740     index d0b95b7a90b4..6d112d8f799c 100644
3741     --- a/arch/x86/kvm/x86.h
3742     +++ b/arch/x86/kvm/x86.h
3743     @@ -12,6 +12,7 @@
3744    
3745     static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
3746     {
3747     + vcpu->arch.exception.pending = false;
3748     vcpu->arch.exception.injected = false;
3749     }
3750    
3751     diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
3752     index eaaf1ebcc7a4..5bfbc1c401d4 100644
3753     --- a/arch/xtensa/include/asm/futex.h
3754     +++ b/arch/xtensa/include/asm/futex.h
3755     @@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
3756     u32 oldval, u32 newval)
3757     {
3758     int ret = 0;
3759     - u32 prev;
3760    
3761     if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
3762     return -EFAULT;
3763     @@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
3764    
3765     __asm__ __volatile__ (
3766     " # futex_atomic_cmpxchg_inatomic\n"
3767     - "1: l32i %1, %3, 0\n"
3768     - " mov %0, %5\n"
3769     - " wsr %1, scompare1\n"
3770     - "2: s32c1i %0, %3, 0\n"
3771     - "3:\n"
3772     + " wsr %5, scompare1\n"
3773     + "1: s32c1i %1, %4, 0\n"
3774     + " s32i %1, %6, 0\n"
3775     + "2:\n"
3776     " .section .fixup,\"ax\"\n"
3777     " .align 4\n"
3778     - "4: .long 3b\n"
3779     - "5: l32r %1, 4b\n"
3780     - " movi %0, %6\n"
3781     + "3: .long 2b\n"
3782     + "4: l32r %1, 3b\n"
3783     + " movi %0, %7\n"
3784     " jx %1\n"
3785     " .previous\n"
3786     " .section __ex_table,\"a\"\n"
3787     - " .long 1b,5b,2b,5b\n"
3788     + " .long 1b,4b\n"
3789     " .previous\n"
3790     - : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
3791     - : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT)
3792     + : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
3793     + : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
3794     : "memory");
3795    
3796     - *uval = prev;
3797     return ret;
3798     }
3799    
3800     diff --git a/block/blk-core.c b/block/blk-core.c
3801     index 3ba4326a63b5..82b92adf3477 100644
3802     --- a/block/blk-core.c
3803     +++ b/block/blk-core.c
3804     @@ -699,6 +699,15 @@ void blk_cleanup_queue(struct request_queue *q)
3805     queue_flag_set(QUEUE_FLAG_DEAD, q);
3806     spin_unlock_irq(lock);
3807    
3808     + /*
3809     + * make sure all in-progress dispatch are completed because
3810     + * blk_freeze_queue() can only complete all requests, and
3811     + * dispatch may still be in-progress since we dispatch requests
3812     + * from more than one contexts
3813     + */
3814     + if (q->mq_ops)
3815     + blk_mq_quiesce_queue(q);
3816     +
3817     /* for synchronous bio-based driver finish in-flight integrity i/o */
3818     blk_flush_integrity();
3819    
3820     diff --git a/crypto/ahash.c b/crypto/ahash.c
3821     index 3a35d67de7d9..266fc1d64f61 100644
3822     --- a/crypto/ahash.c
3823     +++ b/crypto/ahash.c
3824     @@ -193,11 +193,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
3825     unsigned int keylen)
3826     {
3827     unsigned long alignmask = crypto_ahash_alignmask(tfm);
3828     + int err;
3829    
3830     if ((unsigned long)key & alignmask)
3831     - return ahash_setkey_unaligned(tfm, key, keylen);
3832     + err = ahash_setkey_unaligned(tfm, key, keylen);
3833     + else
3834     + err = tfm->setkey(tfm, key, keylen);
3835     +
3836     + if (err)
3837     + return err;
3838    
3839     - return tfm->setkey(tfm, key, keylen);
3840     + crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
3841     + return 0;
3842     }
3843     EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
3844    
3845     @@ -368,7 +375,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup);
3846    
3847     int crypto_ahash_digest(struct ahash_request *req)
3848     {
3849     - return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
3850     + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
3851     +
3852     + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
3853     + return -ENOKEY;
3854     +
3855     + return crypto_ahash_op(req, tfm->digest);
3856     }
3857     EXPORT_SYMBOL_GPL(crypto_ahash_digest);
3858    
3859     @@ -450,7 +462,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
3860     struct ahash_alg *alg = crypto_ahash_alg(hash);
3861    
3862     hash->setkey = ahash_nosetkey;
3863     - hash->has_setkey = false;
3864     hash->export = ahash_no_export;
3865     hash->import = ahash_no_import;
3866    
3867     @@ -465,7 +476,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
3868    
3869     if (alg->setkey) {
3870     hash->setkey = alg->setkey;
3871     - hash->has_setkey = true;
3872     + if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
3873     + crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
3874     }
3875     if (alg->export)
3876     hash->export = alg->export;
3877     @@ -649,5 +661,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
3878     }
3879     EXPORT_SYMBOL_GPL(ahash_attr_alg);
3880    
3881     +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
3882     +{
3883     + struct crypto_alg *alg = &halg->base;
3884     +
3885     + if (alg->cra_type != &crypto_ahash_type)
3886     + return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
3887     +
3888     + return __crypto_ahash_alg(alg)->setkey != NULL;
3889     +}
3890     +EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
3891     +
3892     MODULE_LICENSE("GPL");
3893     MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
3894     diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
3895     index 76d2e716c792..6c9b1927a520 100644
3896     --- a/crypto/algif_hash.c
3897     +++ b/crypto/algif_hash.c
3898     @@ -34,11 +34,6 @@ struct hash_ctx {
3899     struct ahash_request req;
3900     };
3901    
3902     -struct algif_hash_tfm {
3903     - struct crypto_ahash *hash;
3904     - bool has_key;
3905     -};
3906     -
3907     static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
3908     {
3909     unsigned ds;
3910     @@ -307,7 +302,7 @@ static int hash_check_key(struct socket *sock)
3911     int err = 0;
3912     struct sock *psk;
3913     struct alg_sock *pask;
3914     - struct algif_hash_tfm *tfm;
3915     + struct crypto_ahash *tfm;
3916     struct sock *sk = sock->sk;
3917     struct alg_sock *ask = alg_sk(sk);
3918    
3919     @@ -321,7 +316,7 @@ static int hash_check_key(struct socket *sock)
3920    
3921     err = -ENOKEY;
3922     lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
3923     - if (!tfm->has_key)
3924     + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
3925     goto unlock;
3926    
3927     if (!pask->refcnt++)
3928     @@ -412,41 +407,17 @@ static struct proto_ops algif_hash_ops_nokey = {
3929    
3930     static void *hash_bind(const char *name, u32 type, u32 mask)
3931     {
3932     - struct algif_hash_tfm *tfm;
3933     - struct crypto_ahash *hash;
3934     -
3935     - tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
3936     - if (!tfm)
3937     - return ERR_PTR(-ENOMEM);
3938     -
3939     - hash = crypto_alloc_ahash(name, type, mask);
3940     - if (IS_ERR(hash)) {
3941     - kfree(tfm);
3942     - return ERR_CAST(hash);
3943     - }
3944     -
3945     - tfm->hash = hash;
3946     -
3947     - return tfm;
3948     + return crypto_alloc_ahash(name, type, mask);
3949     }
3950    
3951     static void hash_release(void *private)
3952     {
3953     - struct algif_hash_tfm *tfm = private;
3954     -
3955     - crypto_free_ahash(tfm->hash);
3956     - kfree(tfm);
3957     + crypto_free_ahash(private);
3958     }
3959    
3960     static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
3961     {
3962     - struct algif_hash_tfm *tfm = private;
3963     - int err;
3964     -
3965     - err = crypto_ahash_setkey(tfm->hash, key, keylen);
3966     - tfm->has_key = !err;
3967     -
3968     - return err;
3969     + return crypto_ahash_setkey(private, key, keylen);
3970     }
3971    
3972     static void hash_sock_destruct(struct sock *sk)
3973     @@ -461,11 +432,10 @@ static void hash_sock_destruct(struct sock *sk)
3974    
3975     static int hash_accept_parent_nokey(void *private, struct sock *sk)
3976     {
3977     - struct hash_ctx *ctx;
3978     + struct crypto_ahash *tfm = private;
3979     struct alg_sock *ask = alg_sk(sk);
3980     - struct algif_hash_tfm *tfm = private;
3981     - struct crypto_ahash *hash = tfm->hash;
3982     - unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
3983     + struct hash_ctx *ctx;
3984     + unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm);
3985    
3986     ctx = sock_kmalloc(sk, len, GFP_KERNEL);
3987     if (!ctx)
3988     @@ -478,7 +448,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
3989    
3990     ask->private = ctx;
3991    
3992     - ahash_request_set_tfm(&ctx->req, hash);
3993     + ahash_request_set_tfm(&ctx->req, tfm);
3994     ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
3995     crypto_req_done, &ctx->wait);
3996    
3997     @@ -489,9 +459,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
3998    
3999     static int hash_accept_parent(void *private, struct sock *sk)
4000     {
4001     - struct algif_hash_tfm *tfm = private;
4002     + struct crypto_ahash *tfm = private;
4003    
4004     - if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
4005     + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
4006     return -ENOKEY;
4007    
4008     return hash_accept_parent_nokey(private, sk);
4009     diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
4010     index aa2a25fc7482..718cbce8d169 100644
4011     --- a/crypto/crc32_generic.c
4012     +++ b/crypto/crc32_generic.c
4013     @@ -133,6 +133,7 @@ static struct shash_alg alg = {
4014     .cra_name = "crc32",
4015     .cra_driver_name = "crc32-generic",
4016     .cra_priority = 100,
4017     + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
4018     .cra_blocksize = CHKSUM_BLOCK_SIZE,
4019     .cra_ctxsize = sizeof(u32),
4020     .cra_module = THIS_MODULE,
4021     diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
4022     index 4c0a0e271876..372320399622 100644
4023     --- a/crypto/crc32c_generic.c
4024     +++ b/crypto/crc32c_generic.c
4025     @@ -146,6 +146,7 @@ static struct shash_alg alg = {
4026     .cra_name = "crc32c",
4027     .cra_driver_name = "crc32c-generic",
4028     .cra_priority = 100,
4029     + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
4030     .cra_blocksize = CHKSUM_BLOCK_SIZE,
4031     .cra_alignmask = 3,
4032     .cra_ctxsize = sizeof(struct chksum_ctx),
4033     diff --git a/crypto/cryptd.c b/crypto/cryptd.c
4034     index bd43cf5be14c..c32b98b5daf8 100644
4035     --- a/crypto/cryptd.c
4036     +++ b/crypto/cryptd.c
4037     @@ -893,10 +893,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
4038     if (err)
4039     goto out_free_inst;
4040    
4041     - type = CRYPTO_ALG_ASYNC;
4042     - if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
4043     - type |= CRYPTO_ALG_INTERNAL;
4044     - inst->alg.halg.base.cra_flags = type;
4045     + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
4046     + (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
4047     + CRYPTO_ALG_OPTIONAL_KEY));
4048    
4049     inst->alg.halg.digestsize = salg->digestsize;
4050     inst->alg.halg.statesize = salg->statesize;
4051     @@ -911,7 +910,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
4052     inst->alg.finup = cryptd_hash_finup_enqueue;
4053     inst->alg.export = cryptd_hash_export;
4054     inst->alg.import = cryptd_hash_import;
4055     - inst->alg.setkey = cryptd_hash_setkey;
4056     + if (crypto_shash_alg_has_setkey(salg))
4057     + inst->alg.setkey = cryptd_hash_setkey;
4058     inst->alg.digest = cryptd_hash_digest_enqueue;
4059    
4060     err = ahash_register_instance(tmpl, inst);
4061     diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
4062     index eca04d3729b3..e0732d979e3b 100644
4063     --- a/crypto/mcryptd.c
4064     +++ b/crypto/mcryptd.c
4065     @@ -517,10 +517,9 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
4066     if (err)
4067     goto out_free_inst;
4068    
4069     - type = CRYPTO_ALG_ASYNC;
4070     - if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
4071     - type |= CRYPTO_ALG_INTERNAL;
4072     - inst->alg.halg.base.cra_flags = type;
4073     + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
4074     + (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
4075     + CRYPTO_ALG_OPTIONAL_KEY));
4076    
4077     inst->alg.halg.digestsize = halg->digestsize;
4078     inst->alg.halg.statesize = halg->statesize;
4079     @@ -535,7 +534,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
4080     inst->alg.finup = mcryptd_hash_finup_enqueue;
4081     inst->alg.export = mcryptd_hash_export;
4082     inst->alg.import = mcryptd_hash_import;
4083     - inst->alg.setkey = mcryptd_hash_setkey;
4084     + if (crypto_hash_alg_has_setkey(halg))
4085     + inst->alg.setkey = mcryptd_hash_setkey;
4086     inst->alg.digest = mcryptd_hash_digest_enqueue;
4087    
4088     err = ahash_register_instance(tmpl, inst);
4089     diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
4090     index b1c2d57dc734..ba39eb308c79 100644
4091     --- a/crypto/poly1305_generic.c
4092     +++ b/crypto/poly1305_generic.c
4093     @@ -47,17 +47,6 @@ int crypto_poly1305_init(struct shash_desc *desc)
4094     }
4095     EXPORT_SYMBOL_GPL(crypto_poly1305_init);
4096    
4097     -int crypto_poly1305_setkey(struct crypto_shash *tfm,
4098     - const u8 *key, unsigned int keylen)
4099     -{
4100     - /* Poly1305 requires a unique key for each tag, which implies that
4101     - * we can't set it on the tfm that gets accessed by multiple users
4102     - * simultaneously. Instead we expect the key as the first 32 bytes in
4103     - * the update() call. */
4104     - return -ENOTSUPP;
4105     -}
4106     -EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
4107     -
4108     static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
4109     {
4110     /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
4111     @@ -76,6 +65,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
4112     dctx->s[3] = get_unaligned_le32(key + 12);
4113     }
4114    
4115     +/*
4116     + * Poly1305 requires a unique key for each tag, which implies that we can't set
4117     + * it on the tfm that gets accessed by multiple users simultaneously. Instead we
4118     + * expect the key as the first 32 bytes in the update() call.
4119     + */
4120     unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
4121     const u8 *src, unsigned int srclen)
4122     {
4123     @@ -281,7 +275,6 @@ static struct shash_alg poly1305_alg = {
4124     .init = crypto_poly1305_init,
4125     .update = crypto_poly1305_update,
4126     .final = crypto_poly1305_final,
4127     - .setkey = crypto_poly1305_setkey,
4128     .descsize = sizeof(struct poly1305_desc_ctx),
4129     .base = {
4130     .cra_name = "poly1305",
4131     diff --git a/crypto/shash.c b/crypto/shash.c
4132     index e849d3ee2e27..5d732c6bb4b2 100644
4133     --- a/crypto/shash.c
4134     +++ b/crypto/shash.c
4135     @@ -58,11 +58,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
4136     {
4137     struct shash_alg *shash = crypto_shash_alg(tfm);
4138     unsigned long alignmask = crypto_shash_alignmask(tfm);
4139     + int err;
4140    
4141     if ((unsigned long)key & alignmask)
4142     - return shash_setkey_unaligned(tfm, key, keylen);
4143     + err = shash_setkey_unaligned(tfm, key, keylen);
4144     + else
4145     + err = shash->setkey(tfm, key, keylen);
4146     +
4147     + if (err)
4148     + return err;
4149    
4150     - return shash->setkey(tfm, key, keylen);
4151     + crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
4152     + return 0;
4153     }
4154     EXPORT_SYMBOL_GPL(crypto_shash_setkey);
4155    
4156     @@ -181,6 +188,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
4157     struct shash_alg *shash = crypto_shash_alg(tfm);
4158     unsigned long alignmask = crypto_shash_alignmask(tfm);
4159    
4160     + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
4161     + return -ENOKEY;
4162     +
4163     if (((unsigned long)data | (unsigned long)out) & alignmask)
4164     return shash_digest_unaligned(desc, data, len, out);
4165    
4166     @@ -360,7 +370,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
4167     crt->digest = shash_async_digest;
4168     crt->setkey = shash_async_setkey;
4169    
4170     - crt->has_setkey = alg->setkey != shash_no_setkey;
4171     + crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
4172     + CRYPTO_TFM_NEED_KEY);
4173    
4174     if (alg->export)
4175     crt->export = shash_async_export;
4176     @@ -375,8 +386,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
4177     static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
4178     {
4179     struct crypto_shash *hash = __crypto_shash_cast(tfm);
4180     + struct shash_alg *alg = crypto_shash_alg(hash);
4181     +
4182     + hash->descsize = alg->descsize;
4183     +
4184     + if (crypto_shash_alg_has_setkey(alg) &&
4185     + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
4186     + crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
4187    
4188     - hash->descsize = crypto_shash_alg(hash)->descsize;
4189     return 0;
4190     }
4191    
4192     diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
4193     index abeb4df4f22e..b28ce440a06f 100644
4194     --- a/drivers/acpi/nfit/core.c
4195     +++ b/drivers/acpi/nfit/core.c
4196     @@ -1867,6 +1867,9 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
4197     struct kernfs_node *nfit_kernfs;
4198    
4199     nvdimm = nfit_mem->nvdimm;
4200     + if (!nvdimm)
4201     + continue;
4202     +
4203     nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
4204     if (nfit_kernfs)
4205     nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
4206     diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
4207     index 2fa8304171e0..7a3431018e0a 100644
4208     --- a/drivers/acpi/sbshc.c
4209     +++ b/drivers/acpi/sbshc.c
4210     @@ -275,8 +275,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
4211     device->driver_data = hc;
4212    
4213     acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc);
4214     - printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n",
4215     - hc->ec, hc->offset, hc->query_bit);
4216     + dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n",
4217     + hc->offset, hc->query_bit);
4218    
4219     return 0;
4220     }
4221     diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
4222     index 5443cb71d7ba..44a9d630b7ac 100644
4223     --- a/drivers/ata/ahci.c
4224     +++ b/drivers/ata/ahci.c
4225     @@ -268,9 +268,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
4226     { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
4227     { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
4228     { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
4229     - { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
4230     + { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH M AHCI */
4231     { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
4232     - { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
4233     + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH M RAID */
4234     { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
4235     { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
4236     { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
4237     @@ -293,9 +293,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
4238     { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
4239     { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
4240     { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
4241     - { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
4242     + { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT M AHCI */
4243     { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
4244     - { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
4245     + { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT M RAID */
4246     { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
4247     { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
4248     { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
4249     @@ -304,20 +304,20 @@ static const struct pci_device_id ahci_pci_tbl[] = {
4250     { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
4251     { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
4252     { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
4253     - { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */
4254     + { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point M AHCI */
4255     { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
4256     { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
4257     { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
4258     - { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
4259     + { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point M RAID */
4260     { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
4261     { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
4262     - { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */
4263     + { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point M AHCI */
4264     { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
4265     - { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */
4266     + { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point M RAID */
4267     { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
4268     - { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
4269     + { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point M RAID */
4270     { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
4271     - { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
4272     + { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point M RAID */
4273     { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
4274     { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
4275     { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
4276     @@ -358,21 +358,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
4277     { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
4278     { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
4279     { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
4280     - { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
4281     + { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series M AHCI */
4282     { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
4283     - { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
4284     + { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series M RAID */
4285     { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
4286     - { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
4287     + { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series M RAID */
4288     { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
4289     - { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
4290     + { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series M RAID */
4291     { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
4292     { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
4293     { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
4294     { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
4295     - { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
4296     + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H M AHCI */
4297     { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
4298     { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
4299     - { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
4300     + { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H M RAID */
4301     { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
4302     { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
4303     { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
4304     @@ -386,6 +386,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
4305     { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
4306     { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
4307     { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
4308     + { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
4309     + { PCI_VDEVICE(INTEL, 0x0f22), board_ahci }, /* Bay Trail AHCI */
4310     + { PCI_VDEVICE(INTEL, 0x0f23), board_ahci }, /* Bay Trail AHCI */
4311     + { PCI_VDEVICE(INTEL, 0x22a3), board_ahci }, /* Cherry Trail AHCI */
4312     + { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci }, /* Apollo Lake AHCI */
4313    
4314     /* JMicron 360/1/3/5/6, match class to avoid IDE function */
4315     { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
4316     diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
4317     index 67974796c350..531a0915066b 100644
4318     --- a/drivers/block/pktcdvd.c
4319     +++ b/drivers/block/pktcdvd.c
4320     @@ -2579,14 +2579,14 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
4321     bdev = bdget(dev);
4322     if (!bdev)
4323     return -ENOMEM;
4324     + ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
4325     + if (ret)
4326     + return ret;
4327     if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
4328     WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
4329     - bdput(bdev);
4330     + blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
4331     return -EINVAL;
4332     }
4333     - ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
4334     - if (ret)
4335     - return ret;
4336    
4337     /* This is safe, since we have a reference from open(). */
4338     __module_get(THIS_MODULE);
4339     @@ -2745,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
4340     pd->pkt_dev = MKDEV(pktdev_major, idx);
4341     ret = pkt_new_dev(pd, dev);
4342     if (ret)
4343     - goto out_new_dev;
4344     + goto out_mem2;
4345    
4346     /* inherit events of the host device */
4347     disk->events = pd->bdev->bd_disk->events;
4348     @@ -2763,8 +2763,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
4349     mutex_unlock(&ctl_mutex);
4350     return 0;
4351    
4352     -out_new_dev:
4353     - blk_cleanup_queue(disk->queue);
4354     out_mem2:
4355     put_disk(disk);
4356     out_mem:
4357     diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
4358     index c8e945d19ffe..20142bc77554 100644
4359     --- a/drivers/bluetooth/btsdio.c
4360     +++ b/drivers/bluetooth/btsdio.c
4361     @@ -31,6 +31,7 @@
4362     #include <linux/errno.h>
4363     #include <linux/skbuff.h>
4364    
4365     +#include <linux/mmc/host.h>
4366     #include <linux/mmc/sdio_ids.h>
4367     #include <linux/mmc/sdio_func.h>
4368    
4369     @@ -292,6 +293,14 @@ static int btsdio_probe(struct sdio_func *func,
4370     tuple = tuple->next;
4371     }
4372    
4373     + /* BCM43341 devices soldered onto the PCB (non-removable) use an
4374     + * uart connection for bluetooth, ignore the BT SDIO interface.
4375     + */
4376     + if (func->vendor == SDIO_VENDOR_ID_BROADCOM &&
4377     + func->device == SDIO_DEVICE_ID_BROADCOM_43341 &&
4378     + !mmc_card_is_removable(func->card->host))
4379     + return -ENODEV;
4380     +
4381     data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
4382     if (!data)
4383     return -ENOMEM;
4384     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
4385     index f7120c9eb9bd..76980e78ae56 100644
4386     --- a/drivers/bluetooth/btusb.c
4387     +++ b/drivers/bluetooth/btusb.c
4388     @@ -23,6 +23,7 @@
4389    
4390     #include <linux/module.h>
4391     #include <linux/usb.h>
4392     +#include <linux/usb/quirks.h>
4393     #include <linux/firmware.h>
4394     #include <linux/of_device.h>
4395     #include <linux/of_irq.h>
4396     @@ -387,9 +388,8 @@ static const struct usb_device_id blacklist_table[] = {
4397     #define BTUSB_FIRMWARE_LOADED 7
4398     #define BTUSB_FIRMWARE_FAILED 8
4399     #define BTUSB_BOOTING 9
4400     -#define BTUSB_RESET_RESUME 10
4401     -#define BTUSB_DIAG_RUNNING 11
4402     -#define BTUSB_OOB_WAKE_ENABLED 12
4403     +#define BTUSB_DIAG_RUNNING 10
4404     +#define BTUSB_OOB_WAKE_ENABLED 11
4405    
4406     struct btusb_data {
4407     struct hci_dev *hdev;
4408     @@ -3120,9 +3120,9 @@ static int btusb_probe(struct usb_interface *intf,
4409    
4410     /* QCA Rome devices lose their updated firmware over suspend,
4411     * but the USB hub doesn't notice any status change.
4412     - * Explicitly request a device reset on resume.
4413     + * explicitly request a device reset on resume.
4414     */
4415     - set_bit(BTUSB_RESET_RESUME, &data->flags);
4416     + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
4417     }
4418    
4419     #ifdef CONFIG_BT_HCIBTUSB_RTL
4420     @@ -3133,7 +3133,7 @@ static int btusb_probe(struct usb_interface *intf,
4421     * but the USB hub doesn't notice any status change.
4422     * Explicitly request a device reset on resume.
4423     */
4424     - set_bit(BTUSB_RESET_RESUME, &data->flags);
4425     + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
4426     }
4427     #endif
4428    
4429     @@ -3299,14 +3299,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
4430     enable_irq(data->oob_wake_irq);
4431     }
4432    
4433     - /* Optionally request a device reset on resume, but only when
4434     - * wakeups are disabled. If wakeups are enabled we assume the
4435     - * device will stay powered up throughout suspend.
4436     - */
4437     - if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
4438     - !device_may_wakeup(&data->udev->dev))
4439     - data->udev->reset_resume = 1;
4440     -
4441     return 0;
4442     }
4443    
4444     diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
4445     index ab78b3be7e33..c5112b17d7ea 100644
4446     --- a/drivers/char/ipmi/ipmi_dmi.c
4447     +++ b/drivers/char/ipmi/ipmi_dmi.c
4448     @@ -106,7 +106,10 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
4449     pr_err("ipmi:dmi: Error allocation IPMI platform device\n");
4450     return;
4451     }
4452     - pdev->driver_override = override;
4453     + pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
4454     + override);
4455     + if (!pdev->driver_override)
4456     + goto err;
4457    
4458     if (type == IPMI_DMI_TYPE_SSIF) {
4459     set_prop_entry(p[pidx++], "i2c-addr", u16, base_addr);
4460     diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
4461     index 8f2423789ba9..4bfeb9929ab2 100644
4462     --- a/drivers/clocksource/timer-stm32.c
4463     +++ b/drivers/clocksource/timer-stm32.c
4464     @@ -106,6 +106,10 @@ static int __init stm32_clockevent_init(struct device_node *np)
4465     unsigned long rate, max_delta;
4466     int irq, ret, bits, prescaler = 1;
4467    
4468     + data = kmemdup(&clock_event_ddata, sizeof(*data), GFP_KERNEL);
4469     + if (!data)
4470     + return -ENOMEM;
4471     +
4472     clk = of_clk_get(np, 0);
4473     if (IS_ERR(clk)) {
4474     ret = PTR_ERR(clk);
4475     @@ -156,8 +160,8 @@ static int __init stm32_clockevent_init(struct device_node *np)
4476    
4477     writel_relaxed(prescaler - 1, data->base + TIM_PSC);
4478     writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR);
4479     - writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER);
4480     writel_relaxed(0, data->base + TIM_SR);
4481     + writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER);
4482    
4483     data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ);
4484    
4485     @@ -184,6 +188,7 @@ static int __init stm32_clockevent_init(struct device_node *np)
4486     err_clk_enable:
4487     clk_put(clk);
4488     err_clk_get:
4489     + kfree(data);
4490     return ret;
4491     }
4492    
4493     diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
4494     index ecc56e26f8f6..3b585e4bfac5 100644
4495     --- a/drivers/cpufreq/cpufreq-dt-platdev.c
4496     +++ b/drivers/cpufreq/cpufreq-dt-platdev.c
4497     @@ -108,6 +108,14 @@ static const struct of_device_id blacklist[] __initconst = {
4498    
4499     { .compatible = "marvell,armadaxp", },
4500    
4501     + { .compatible = "mediatek,mt2701", },
4502     + { .compatible = "mediatek,mt2712", },
4503     + { .compatible = "mediatek,mt7622", },
4504     + { .compatible = "mediatek,mt7623", },
4505     + { .compatible = "mediatek,mt817x", },
4506     + { .compatible = "mediatek,mt8173", },
4507     + { .compatible = "mediatek,mt8176", },
4508     +
4509     { .compatible = "nvidia,tegra124", },
4510    
4511     { .compatible = "st,stih407", },
4512     diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
4513     index a118b9bed669..bfbf8bf77f03 100644
4514     --- a/drivers/crypto/bfin_crc.c
4515     +++ b/drivers/crypto/bfin_crc.c
4516     @@ -494,7 +494,8 @@ static struct ahash_alg algs = {
4517     .cra_driver_name = DRIVER_NAME,
4518     .cra_priority = 100,
4519     .cra_flags = CRYPTO_ALG_TYPE_AHASH |
4520     - CRYPTO_ALG_ASYNC,
4521     + CRYPTO_ALG_ASYNC |
4522     + CRYPTO_ALG_OPTIONAL_KEY,
4523     .cra_blocksize = CHKSUM_BLOCK_SIZE,
4524     .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
4525     .cra_alignmask = 3,
4526     diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
4527     index 027e121c6f70..e1d4ae1153c4 100644
4528     --- a/drivers/crypto/caam/ctrl.c
4529     +++ b/drivers/crypto/caam/ctrl.c
4530     @@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
4531     * without any error (HW optimizations for later
4532     * CAAM eras), then try again.
4533     */
4534     + if (ret)
4535     + break;
4536     +
4537     rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
4538     if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
4539     - !(rdsta_val & (1 << sh_idx)))
4540     + !(rdsta_val & (1 << sh_idx))) {
4541     ret = -EAGAIN;
4542     - if (ret)
4543     break;
4544     + }
4545     +
4546     dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
4547     /* Clear the contents before recreating the descriptor */
4548     memset(desc, 0x00, CAAM_CMD_SZ * 7);
4549     diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
4550     index 090582baecfe..8f09b8430893 100644
4551     --- a/drivers/crypto/stm32/stm32_crc32.c
4552     +++ b/drivers/crypto/stm32/stm32_crc32.c
4553     @@ -208,6 +208,7 @@ static struct shash_alg algs[] = {
4554     .cra_name = "crc32",
4555     .cra_driver_name = DRIVER_NAME,
4556     .cra_priority = 200,
4557     + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
4558     .cra_blocksize = CHKSUM_BLOCK_SIZE,
4559     .cra_alignmask = 3,
4560     .cra_ctxsize = sizeof(struct stm32_crc_ctx),
4561     @@ -229,6 +230,7 @@ static struct shash_alg algs[] = {
4562     .cra_name = "crc32c",
4563     .cra_driver_name = DRIVER_NAME,
4564     .cra_priority = 200,
4565     + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
4566     .cra_blocksize = CHKSUM_BLOCK_SIZE,
4567     .cra_alignmask = 3,
4568     .cra_ctxsize = sizeof(struct stm32_crc_ctx),
4569     diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
4570     index 9c80e0cb1664..6882fa2f8bad 100644
4571     --- a/drivers/crypto/talitos.c
4572     +++ b/drivers/crypto/talitos.c
4573     @@ -1138,6 +1138,10 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
4574     struct talitos_private *priv = dev_get_drvdata(dev);
4575     bool is_sec1 = has_ftr_sec1(priv);
4576    
4577     + if (!src) {
4578     + to_talitos_ptr(ptr, 0, 0, is_sec1);
4579     + return 1;
4580     + }
4581     if (sg_count == 1) {
4582     to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
4583     return sg_count;
4584     diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
4585     index ec5f9d2bc820..80cc2be6483c 100644
4586     --- a/drivers/dma/dmatest.c
4587     +++ b/drivers/dma/dmatest.c
4588     @@ -355,7 +355,7 @@ static void dmatest_callback(void *arg)
4589     {
4590     struct dmatest_done *done = arg;
4591     struct dmatest_thread *thread =
4592     - container_of(arg, struct dmatest_thread, done_wait);
4593     + container_of(done, struct dmatest_thread, test_done);
4594     if (!thread->done) {
4595     done->done = true;
4596     wake_up_all(done->wait);
4597     diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
4598     index 9c1ffe3e912b..aeb222ca3ed1 100644
4599     --- a/drivers/edac/octeon_edac-lmc.c
4600     +++ b/drivers/edac/octeon_edac-lmc.c
4601     @@ -78,6 +78,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
4602     if (!pvt->inject)
4603     int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
4604     else {
4605     + int_reg.u64 = 0;
4606     if (pvt->error_type == 1)
4607     int_reg.s.sec_err = 1;
4608     if (pvt->error_type == 2)
4609     diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
4610     index d687ca3d5049..c80ec1d03274 100644
4611     --- a/drivers/firmware/psci.c
4612     +++ b/drivers/firmware/psci.c
4613     @@ -59,7 +59,10 @@ bool psci_tos_resident_on(int cpu)
4614     return cpu == resident_cpu;
4615     }
4616    
4617     -struct psci_operations psci_ops;
4618     +struct psci_operations psci_ops = {
4619     + .conduit = PSCI_CONDUIT_NONE,
4620     + .smccc_version = SMCCC_VERSION_1_0,
4621     +};
4622    
4623     typedef unsigned long (psci_fn)(unsigned long, unsigned long,
4624     unsigned long, unsigned long);
4625     @@ -210,6 +213,22 @@ static unsigned long psci_migrate_info_up_cpu(void)
4626     0, 0, 0);
4627     }
4628    
4629     +static void set_conduit(enum psci_conduit conduit)
4630     +{
4631     + switch (conduit) {
4632     + case PSCI_CONDUIT_HVC:
4633     + invoke_psci_fn = __invoke_psci_fn_hvc;
4634     + break;
4635     + case PSCI_CONDUIT_SMC:
4636     + invoke_psci_fn = __invoke_psci_fn_smc;
4637     + break;
4638     + default:
4639     + WARN(1, "Unexpected PSCI conduit %d\n", conduit);
4640     + }
4641     +
4642     + psci_ops.conduit = conduit;
4643     +}
4644     +
4645     static int get_set_conduit_method(struct device_node *np)
4646     {
4647     const char *method;
4648     @@ -222,9 +241,9 @@ static int get_set_conduit_method(struct device_node *np)
4649     }
4650    
4651     if (!strcmp("hvc", method)) {
4652     - invoke_psci_fn = __invoke_psci_fn_hvc;
4653     + set_conduit(PSCI_CONDUIT_HVC);
4654     } else if (!strcmp("smc", method)) {
4655     - invoke_psci_fn = __invoke_psci_fn_smc;
4656     + set_conduit(PSCI_CONDUIT_SMC);
4657     } else {
4658     pr_warn("invalid \"method\" property: %s\n", method);
4659     return -EINVAL;
4660     @@ -493,9 +512,36 @@ static void __init psci_init_migrate(void)
4661     pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
4662     }
4663    
4664     +static void __init psci_init_smccc(void)
4665     +{
4666     + u32 ver = ARM_SMCCC_VERSION_1_0;
4667     + int feature;
4668     +
4669     + feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
4670     +
4671     + if (feature != PSCI_RET_NOT_SUPPORTED) {
4672     + u32 ret;
4673     + ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
4674     + if (ret == ARM_SMCCC_VERSION_1_1) {
4675     + psci_ops.smccc_version = SMCCC_VERSION_1_1;
4676     + ver = ret;
4677     + }
4678     + }
4679     +
4680     + /*
4681     + * Conveniently, the SMCCC and PSCI versions are encoded the
4682     + * same way. No, this isn't accidental.
4683     + */
4684     + pr_info("SMC Calling Convention v%d.%d\n",
4685     + PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
4686     +
4687     +}
4688     +
4689     static void __init psci_0_2_set_functions(void)
4690     {
4691     pr_info("Using standard PSCI v0.2 function IDs\n");
4692     + psci_ops.get_version = psci_get_version;
4693     +
4694     psci_function_id[PSCI_FN_CPU_SUSPEND] =
4695     PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
4696     psci_ops.cpu_suspend = psci_cpu_suspend;
4697     @@ -539,6 +585,7 @@ static int __init psci_probe(void)
4698     psci_init_migrate();
4699    
4700     if (PSCI_VERSION_MAJOR(ver) >= 1) {
4701     + psci_init_smccc();
4702     psci_init_cpu_suspend();
4703     psci_init_system_suspend();
4704     }
4705     @@ -652,9 +699,9 @@ int __init psci_acpi_init(void)
4706     pr_info("probing for conduit method from ACPI.\n");
4707    
4708     if (acpi_psci_use_hvc())
4709     - invoke_psci_fn = __invoke_psci_fn_hvc;
4710     + set_conduit(PSCI_CONDUIT_HVC);
4711     else
4712     - invoke_psci_fn = __invoke_psci_fn_smc;
4713     + set_conduit(PSCI_CONDUIT_SMC);
4714    
4715     return psci_probe();
4716     }
4717     diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
4718     index 6458c309c039..756aad504ed5 100644
4719     --- a/drivers/gpu/drm/i915/i915_pci.c
4720     +++ b/drivers/gpu/drm/i915/i915_pci.c
4721     @@ -74,19 +74,19 @@
4722     GEN_DEFAULT_PAGE_SIZES, \
4723     CURSOR_OFFSETS
4724    
4725     -static const struct intel_device_info intel_i830_info __initconst = {
4726     +static const struct intel_device_info intel_i830_info = {
4727     GEN2_FEATURES,
4728     .platform = INTEL_I830,
4729     .is_mobile = 1, .cursor_needs_physical = 1,
4730     .num_pipes = 2, /* legal, last one wins */
4731     };
4732    
4733     -static const struct intel_device_info intel_i845g_info __initconst = {
4734     +static const struct intel_device_info intel_i845g_info = {
4735     GEN2_FEATURES,
4736     .platform = INTEL_I845G,
4737     };
4738    
4739     -static const struct intel_device_info intel_i85x_info __initconst = {
4740     +static const struct intel_device_info intel_i85x_info = {
4741     GEN2_FEATURES,
4742     .platform = INTEL_I85X, .is_mobile = 1,
4743     .num_pipes = 2, /* legal, last one wins */
4744     @@ -94,7 +94,7 @@ static const struct intel_device_info intel_i85x_info __initconst = {
4745     .has_fbc = 1,
4746     };
4747    
4748     -static const struct intel_device_info intel_i865g_info __initconst = {
4749     +static const struct intel_device_info intel_i865g_info = {
4750     GEN2_FEATURES,
4751     .platform = INTEL_I865G,
4752     };
4753     @@ -108,7 +108,7 @@ static const struct intel_device_info intel_i865g_info __initconst = {
4754     GEN_DEFAULT_PAGE_SIZES, \
4755     CURSOR_OFFSETS
4756    
4757     -static const struct intel_device_info intel_i915g_info __initconst = {
4758     +static const struct intel_device_info intel_i915g_info = {
4759     GEN3_FEATURES,
4760     .platform = INTEL_I915G, .cursor_needs_physical = 1,
4761     .has_overlay = 1, .overlay_needs_physical = 1,
4762     @@ -116,7 +116,7 @@ static const struct intel_device_info intel_i915g_info __initconst = {
4763     .unfenced_needs_alignment = 1,
4764     };
4765    
4766     -static const struct intel_device_info intel_i915gm_info __initconst = {
4767     +static const struct intel_device_info intel_i915gm_info = {
4768     GEN3_FEATURES,
4769     .platform = INTEL_I915GM,
4770     .is_mobile = 1,
4771     @@ -128,7 +128,7 @@ static const struct intel_device_info intel_i915gm_info __initconst = {
4772     .unfenced_needs_alignment = 1,
4773     };
4774    
4775     -static const struct intel_device_info intel_i945g_info __initconst = {
4776     +static const struct intel_device_info intel_i945g_info = {
4777     GEN3_FEATURES,
4778     .platform = INTEL_I945G,
4779     .has_hotplug = 1, .cursor_needs_physical = 1,
4780     @@ -137,7 +137,7 @@ static const struct intel_device_info intel_i945g_info __initconst = {
4781     .unfenced_needs_alignment = 1,
4782     };
4783    
4784     -static const struct intel_device_info intel_i945gm_info __initconst = {
4785     +static const struct intel_device_info intel_i945gm_info = {
4786     GEN3_FEATURES,
4787     .platform = INTEL_I945GM, .is_mobile = 1,
4788     .has_hotplug = 1, .cursor_needs_physical = 1,
4789     @@ -148,14 +148,14 @@ static const struct intel_device_info intel_i945gm_info __initconst = {
4790     .unfenced_needs_alignment = 1,
4791     };
4792    
4793     -static const struct intel_device_info intel_g33_info __initconst = {
4794     +static const struct intel_device_info intel_g33_info = {
4795     GEN3_FEATURES,
4796     .platform = INTEL_G33,
4797     .has_hotplug = 1,
4798     .has_overlay = 1,
4799     };
4800    
4801     -static const struct intel_device_info intel_pineview_info __initconst = {
4802     +static const struct intel_device_info intel_pineview_info = {
4803     GEN3_FEATURES,
4804     .platform = INTEL_PINEVIEW, .is_mobile = 1,
4805     .has_hotplug = 1,
4806     @@ -172,7 +172,7 @@ static const struct intel_device_info intel_pineview_info __initconst = {
4807     GEN_DEFAULT_PAGE_SIZES, \
4808     CURSOR_OFFSETS
4809    
4810     -static const struct intel_device_info intel_i965g_info __initconst = {
4811     +static const struct intel_device_info intel_i965g_info = {
4812     GEN4_FEATURES,
4813     .platform = INTEL_I965G,
4814     .has_overlay = 1,
4815     @@ -180,7 +180,7 @@ static const struct intel_device_info intel_i965g_info __initconst = {
4816     .has_snoop = false,
4817     };
4818    
4819     -static const struct intel_device_info intel_i965gm_info __initconst = {
4820     +static const struct intel_device_info intel_i965gm_info = {
4821     GEN4_FEATURES,
4822     .platform = INTEL_I965GM,
4823     .is_mobile = 1, .has_fbc = 1,
4824     @@ -190,13 +190,13 @@ static const struct intel_device_info intel_i965gm_info __initconst = {
4825     .has_snoop = false,
4826     };
4827    
4828     -static const struct intel_device_info intel_g45_info __initconst = {
4829     +static const struct intel_device_info intel_g45_info = {
4830     GEN4_FEATURES,
4831     .platform = INTEL_G45,
4832     .ring_mask = RENDER_RING | BSD_RING,
4833     };
4834    
4835     -static const struct intel_device_info intel_gm45_info __initconst = {
4836     +static const struct intel_device_info intel_gm45_info = {
4837     GEN4_FEATURES,
4838     .platform = INTEL_GM45,
4839     .is_mobile = 1, .has_fbc = 1,
4840     @@ -213,12 +213,12 @@ static const struct intel_device_info intel_gm45_info __initconst = {
4841     GEN_DEFAULT_PAGE_SIZES, \
4842     CURSOR_OFFSETS
4843    
4844     -static const struct intel_device_info intel_ironlake_d_info __initconst = {
4845     +static const struct intel_device_info intel_ironlake_d_info = {
4846     GEN5_FEATURES,
4847     .platform = INTEL_IRONLAKE,
4848     };
4849    
4850     -static const struct intel_device_info intel_ironlake_m_info __initconst = {
4851     +static const struct intel_device_info intel_ironlake_m_info = {
4852     GEN5_FEATURES,
4853     .platform = INTEL_IRONLAKE,
4854     .is_mobile = 1, .has_fbc = 1,
4855     @@ -241,12 +241,12 @@ static const struct intel_device_info intel_ironlake_m_info __initconst = {
4856     GEN6_FEATURES, \
4857     .platform = INTEL_SANDYBRIDGE
4858    
4859     -static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = {
4860     +static const struct intel_device_info intel_sandybridge_d_gt1_info = {
4861     SNB_D_PLATFORM,
4862     .gt = 1,
4863     };
4864    
4865     -static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = {
4866     +static const struct intel_device_info intel_sandybridge_d_gt2_info = {
4867     SNB_D_PLATFORM,
4868     .gt = 2,
4869     };
4870     @@ -257,12 +257,12 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst =
4871     .is_mobile = 1
4872    
4873    
4874     -static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = {
4875     +static const struct intel_device_info intel_sandybridge_m_gt1_info = {
4876     SNB_M_PLATFORM,
4877     .gt = 1,
4878     };
4879    
4880     -static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = {
4881     +static const struct intel_device_info intel_sandybridge_m_gt2_info = {
4882     SNB_M_PLATFORM,
4883     .gt = 2,
4884     };
4885     @@ -286,12 +286,12 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst =
4886     .platform = INTEL_IVYBRIDGE, \
4887     .has_l3_dpf = 1
4888    
4889     -static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = {
4890     +static const struct intel_device_info intel_ivybridge_d_gt1_info = {
4891     IVB_D_PLATFORM,
4892     .gt = 1,
4893     };
4894    
4895     -static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = {
4896     +static const struct intel_device_info intel_ivybridge_d_gt2_info = {
4897     IVB_D_PLATFORM,
4898     .gt = 2,
4899     };
4900     @@ -302,17 +302,17 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = {
4901     .is_mobile = 1, \
4902     .has_l3_dpf = 1
4903    
4904     -static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = {
4905     +static const struct intel_device_info intel_ivybridge_m_gt1_info = {
4906     IVB_M_PLATFORM,
4907     .gt = 1,
4908     };
4909    
4910     -static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = {
4911     +static const struct intel_device_info intel_ivybridge_m_gt2_info = {
4912     IVB_M_PLATFORM,
4913     .gt = 2,
4914     };
4915    
4916     -static const struct intel_device_info intel_ivybridge_q_info __initconst = {
4917     +static const struct intel_device_info intel_ivybridge_q_info = {
4918     GEN7_FEATURES,
4919     .platform = INTEL_IVYBRIDGE,
4920     .gt = 2,
4921     @@ -320,7 +320,7 @@ static const struct intel_device_info intel_ivybridge_q_info __initconst = {
4922     .has_l3_dpf = 1,
4923     };
4924    
4925     -static const struct intel_device_info intel_valleyview_info __initconst = {
4926     +static const struct intel_device_info intel_valleyview_info = {
4927     .platform = INTEL_VALLEYVIEW,
4928     .gen = 7,
4929     .is_lp = 1,
4930     @@ -356,17 +356,17 @@ static const struct intel_device_info intel_valleyview_info __initconst = {
4931     .platform = INTEL_HASWELL, \
4932     .has_l3_dpf = 1
4933    
4934     -static const struct intel_device_info intel_haswell_gt1_info __initconst = {
4935     +static const struct intel_device_info intel_haswell_gt1_info = {
4936     HSW_PLATFORM,
4937     .gt = 1,
4938     };
4939    
4940     -static const struct intel_device_info intel_haswell_gt2_info __initconst = {
4941     +static const struct intel_device_info intel_haswell_gt2_info = {
4942     HSW_PLATFORM,
4943     .gt = 2,
4944     };
4945    
4946     -static const struct intel_device_info intel_haswell_gt3_info __initconst = {
4947     +static const struct intel_device_info intel_haswell_gt3_info = {
4948     HSW_PLATFORM,
4949     .gt = 3,
4950     };
4951     @@ -386,17 +386,17 @@ static const struct intel_device_info intel_haswell_gt3_info __initconst = {
4952     .gen = 8, \
4953     .platform = INTEL_BROADWELL
4954    
4955     -static const struct intel_device_info intel_broadwell_gt1_info __initconst = {
4956     +static const struct intel_device_info intel_broadwell_gt1_info = {
4957     BDW_PLATFORM,
4958     .gt = 1,
4959     };
4960    
4961     -static const struct intel_device_info intel_broadwell_gt2_info __initconst = {
4962     +static const struct intel_device_info intel_broadwell_gt2_info = {
4963     BDW_PLATFORM,
4964     .gt = 2,
4965     };
4966    
4967     -static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
4968     +static const struct intel_device_info intel_broadwell_rsvd_info = {
4969     BDW_PLATFORM,
4970     .gt = 3,
4971     /* According to the device ID those devices are GT3, they were
4972     @@ -404,13 +404,13 @@ static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
4973     */
4974     };
4975    
4976     -static const struct intel_device_info intel_broadwell_gt3_info __initconst = {
4977     +static const struct intel_device_info intel_broadwell_gt3_info = {
4978     BDW_PLATFORM,
4979     .gt = 3,
4980     .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
4981     };
4982    
4983     -static const struct intel_device_info intel_cherryview_info __initconst = {
4984     +static const struct intel_device_info intel_cherryview_info = {
4985     .gen = 8, .num_pipes = 3,
4986     .has_hotplug = 1,
4987     .is_lp = 1,
4988     @@ -453,12 +453,12 @@ static const struct intel_device_info intel_cherryview_info __initconst = {
4989     .gen = 9, \
4990     .platform = INTEL_SKYLAKE
4991    
4992     -static const struct intel_device_info intel_skylake_gt1_info __initconst = {
4993     +static const struct intel_device_info intel_skylake_gt1_info = {
4994     SKL_PLATFORM,
4995     .gt = 1,
4996     };
4997    
4998     -static const struct intel_device_info intel_skylake_gt2_info __initconst = {
4999     +static const struct intel_device_info intel_skylake_gt2_info = {
5000     SKL_PLATFORM,
5001     .gt = 2,
5002     };
5003     @@ -468,12 +468,12 @@ static const struct intel_device_info intel_skylake_gt2_info __initconst = {
5004     .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
5005    
5006    
5007     -static const struct intel_device_info intel_skylake_gt3_info __initconst = {
5008     +static const struct intel_device_info intel_skylake_gt3_info = {
5009     SKL_GT3_PLUS_PLATFORM,
5010     .gt = 3,
5011     };
5012    
5013     -static const struct intel_device_info intel_skylake_gt4_info __initconst = {
5014     +static const struct intel_device_info intel_skylake_gt4_info = {
5015     SKL_GT3_PLUS_PLATFORM,
5016     .gt = 4,
5017     };
5018     @@ -509,13 +509,13 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
5019     IVB_CURSOR_OFFSETS, \
5020     BDW_COLORS
5021    
5022     -static const struct intel_device_info intel_broxton_info __initconst = {
5023     +static const struct intel_device_info intel_broxton_info = {
5024     GEN9_LP_FEATURES,
5025     .platform = INTEL_BROXTON,
5026     .ddb_size = 512,
5027     };
5028    
5029     -static const struct intel_device_info intel_geminilake_info __initconst = {
5030     +static const struct intel_device_info intel_geminilake_info = {
5031     GEN9_LP_FEATURES,
5032     .platform = INTEL_GEMINILAKE,
5033     .ddb_size = 1024,
5034     @@ -527,17 +527,17 @@ static const struct intel_device_info intel_geminilake_info __initconst = {
5035     .gen = 9, \
5036     .platform = INTEL_KABYLAKE
5037    
5038     -static const struct intel_device_info intel_kabylake_gt1_info __initconst = {
5039     +static const struct intel_device_info intel_kabylake_gt1_info = {
5040     KBL_PLATFORM,
5041     .gt = 1,
5042     };
5043    
5044     -static const struct intel_device_info intel_kabylake_gt2_info __initconst = {
5045     +static const struct intel_device_info intel_kabylake_gt2_info = {
5046     KBL_PLATFORM,
5047     .gt = 2,
5048     };
5049    
5050     -static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
5051     +static const struct intel_device_info intel_kabylake_gt3_info = {
5052     KBL_PLATFORM,
5053     .gt = 3,
5054     .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
5055     @@ -548,17 +548,17 @@ static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
5056     .gen = 9, \
5057     .platform = INTEL_COFFEELAKE
5058    
5059     -static const struct intel_device_info intel_coffeelake_gt1_info __initconst = {
5060     +static const struct intel_device_info intel_coffeelake_gt1_info = {
5061     CFL_PLATFORM,
5062     .gt = 1,
5063     };
5064    
5065     -static const struct intel_device_info intel_coffeelake_gt2_info __initconst = {
5066     +static const struct intel_device_info intel_coffeelake_gt2_info = {
5067     CFL_PLATFORM,
5068     .gt = 2,
5069     };
5070    
5071     -static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
5072     +static const struct intel_device_info intel_coffeelake_gt3_info = {
5073     CFL_PLATFORM,
5074     .gt = 3,
5075     .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
5076     @@ -569,7 +569,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
5077     .ddb_size = 1024, \
5078     GLK_COLORS
5079    
5080     -static const struct intel_device_info intel_cannonlake_gt2_info __initconst = {
5081     +static const struct intel_device_info intel_cannonlake_gt2_info = {
5082     GEN10_FEATURES,
5083     .is_alpha_support = 1,
5084     .platform = INTEL_CANNONLAKE,
5085     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
5086     index 158438bb0389..add4b2434aa3 100644
5087     --- a/drivers/gpu/drm/i915/intel_dp.c
5088     +++ b/drivers/gpu/drm/i915/intel_dp.c
5089     @@ -5336,6 +5336,12 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5090     */
5091     final->t8 = 1;
5092     final->t9 = 1;
5093     +
5094     + /*
5095     + * HW has only a 100msec granularity for t11_t12 so round it up
5096     + * accordingly.
5097     + */
5098     + final->t11_t12 = roundup(final->t11_t12, 100 * 10);
5099     }
5100    
5101     static void
5102     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
5103     index 0c3f608131cf..b5f85d6f6bef 100644
5104     --- a/drivers/hid/hid-core.c
5105     +++ b/drivers/hid/hid-core.c
5106     @@ -2643,7 +2643,6 @@ static const struct hid_device_id hid_ignore_list[] = {
5107     { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
5108     { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
5109     { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
5110     - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
5111     { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
5112     { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
5113     { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
5114     @@ -2913,6 +2912,17 @@ bool hid_ignore(struct hid_device *hdev)
5115     strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
5116     return true;
5117     break;
5118     + case USB_VENDOR_ID_ELAN:
5119     + /*
5120     + * Many Elan devices have a product id of 0x0401 and are handled
5121     + * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev
5122     + * is not (and cannot be) handled by that driver ->
5123     + * Ignore all 0x0401 devs except for the ELAN0800 dev.
5124     + */
5125     + if (hdev->product == 0x0401 &&
5126     + strncmp(hdev->name, "ELAN0800", 8) != 0)
5127     + return true;
5128     + break;
5129     }
5130    
5131     if (hdev->type == HID_TYPE_USBMOUSE &&
5132     diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
5133     index 2afaa8226342..46f977177faf 100644
5134     --- a/drivers/media/dvb-core/dvb_frontend.c
5135     +++ b/drivers/media/dvb-core/dvb_frontend.c
5136     @@ -2110,7 +2110,7 @@ static int dvb_frontend_handle_ioctl(struct file *file,
5137     struct dvb_frontend *fe = dvbdev->priv;
5138     struct dvb_frontend_private *fepriv = fe->frontend_priv;
5139     struct dtv_frontend_properties *c = &fe->dtv_property_cache;
5140     - int i, err;
5141     + int i, err = -EOPNOTSUPP;
5142    
5143     dev_dbg(fe->dvb->device, "%s:\n", __func__);
5144    
5145     @@ -2145,6 +2145,7 @@ static int dvb_frontend_handle_ioctl(struct file *file,
5146     }
5147     }
5148     kfree(tvp);
5149     + err = 0;
5150     break;
5151     }
5152     case FE_GET_PROPERTY: {
5153     @@ -2196,6 +2197,7 @@ static int dvb_frontend_handle_ioctl(struct file *file,
5154     return -EFAULT;
5155     }
5156     kfree(tvp);
5157     + err = 0;
5158     break;
5159     }
5160    
5161     diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
5162     index 0ee0df53b91b..79d5d89bc95e 100644
5163     --- a/drivers/media/dvb-frontends/ascot2e.c
5164     +++ b/drivers/media/dvb-frontends/ascot2e.c
5165     @@ -155,7 +155,9 @@ static int ascot2e_write_regs(struct ascot2e_priv *priv,
5166    
5167     static int ascot2e_write_reg(struct ascot2e_priv *priv, u8 reg, u8 val)
5168     {
5169     - return ascot2e_write_regs(priv, reg, &val, 1);
5170     + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5171     +
5172     + return ascot2e_write_regs(priv, reg, &tmp, 1);
5173     }
5174    
5175     static int ascot2e_read_regs(struct ascot2e_priv *priv,
5176     diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
5177     index 48ee9bc00c06..ccbd84fd6428 100644
5178     --- a/drivers/media/dvb-frontends/cxd2841er.c
5179     +++ b/drivers/media/dvb-frontends/cxd2841er.c
5180     @@ -257,7 +257,9 @@ static int cxd2841er_write_regs(struct cxd2841er_priv *priv,
5181     static int cxd2841er_write_reg(struct cxd2841er_priv *priv,
5182     u8 addr, u8 reg, u8 val)
5183     {
5184     - return cxd2841er_write_regs(priv, addr, reg, &val, 1);
5185     + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5186     +
5187     + return cxd2841er_write_regs(priv, addr, reg, &tmp, 1);
5188     }
5189    
5190     static int cxd2841er_read_regs(struct cxd2841er_priv *priv,
5191     diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
5192     index 4bf5a551ba40..2ab8d83e5576 100644
5193     --- a/drivers/media/dvb-frontends/helene.c
5194     +++ b/drivers/media/dvb-frontends/helene.c
5195     @@ -331,7 +331,9 @@ static int helene_write_regs(struct helene_priv *priv,
5196    
5197     static int helene_write_reg(struct helene_priv *priv, u8 reg, u8 val)
5198     {
5199     - return helene_write_regs(priv, reg, &val, 1);
5200     + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5201     +
5202     + return helene_write_regs(priv, reg, &tmp, 1);
5203     }
5204    
5205     static int helene_read_regs(struct helene_priv *priv,
5206     diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
5207     index 68d759c4c52e..5c8b405f2ddc 100644
5208     --- a/drivers/media/dvb-frontends/horus3a.c
5209     +++ b/drivers/media/dvb-frontends/horus3a.c
5210     @@ -89,7 +89,9 @@ static int horus3a_write_regs(struct horus3a_priv *priv,
5211    
5212     static int horus3a_write_reg(struct horus3a_priv *priv, u8 reg, u8 val)
5213     {
5214     - return horus3a_write_regs(priv, reg, &val, 1);
5215     + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5216     +
5217     + return horus3a_write_regs(priv, reg, &tmp, 1);
5218     }
5219    
5220     static int horus3a_enter_power_save(struct horus3a_priv *priv)
5221     diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
5222     index 5bb1e73a10b4..ce7c443d3eac 100644
5223     --- a/drivers/media/dvb-frontends/itd1000.c
5224     +++ b/drivers/media/dvb-frontends/itd1000.c
5225     @@ -95,8 +95,9 @@ static int itd1000_read_reg(struct itd1000_state *state, u8 reg)
5226    
5227     static inline int itd1000_write_reg(struct itd1000_state *state, u8 r, u8 v)
5228     {
5229     - int ret = itd1000_write_regs(state, r, &v, 1);
5230     - state->shadow[r] = v;
5231     + u8 tmp = v; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5232     + int ret = itd1000_write_regs(state, r, &tmp, 1);
5233     + state->shadow[r] = tmp;
5234     return ret;
5235     }
5236    
5237     diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
5238     index 961b9a2508e0..0b23cbc021b8 100644
5239     --- a/drivers/media/dvb-frontends/mt312.c
5240     +++ b/drivers/media/dvb-frontends/mt312.c
5241     @@ -142,7 +142,10 @@ static inline int mt312_readreg(struct mt312_state *state,
5242     static inline int mt312_writereg(struct mt312_state *state,
5243     const enum mt312_reg_addr reg, const u8 val)
5244     {
5245     - return mt312_write(state, reg, &val, 1);
5246     + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5247     +
5248     +
5249     + return mt312_write(state, reg, &tmp, 1);
5250     }
5251    
5252     static inline u32 mt312_div(u32 a, u32 b)
5253     diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
5254     index 02347598277a..db5dde3215f0 100644
5255     --- a/drivers/media/dvb-frontends/stb0899_drv.c
5256     +++ b/drivers/media/dvb-frontends/stb0899_drv.c
5257     @@ -539,7 +539,8 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data,
5258    
5259     int stb0899_write_reg(struct stb0899_state *state, unsigned int reg, u8 data)
5260     {
5261     - return stb0899_write_regs(state, reg, &data, 1);
5262     + u8 tmp = data;
5263     + return stb0899_write_regs(state, reg, &tmp, 1);
5264     }
5265    
5266     /*
5267     diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
5268     index 17a955d0031b..75509bec66e4 100644
5269     --- a/drivers/media/dvb-frontends/stb6100.c
5270     +++ b/drivers/media/dvb-frontends/stb6100.c
5271     @@ -226,12 +226,14 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st
5272    
5273     static int stb6100_write_reg(struct stb6100_state *state, u8 reg, u8 data)
5274     {
5275     + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5276     +
5277     if (unlikely(reg >= STB6100_NUMREGS)) {
5278     dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg);
5279     return -EREMOTEIO;
5280     }
5281     - data = (data & stb6100_template[reg].mask) | stb6100_template[reg].set;
5282     - return stb6100_write_reg_range(state, &data, reg, 1);
5283     + tmp = (tmp & stb6100_template[reg].mask) | stb6100_template[reg].set;
5284     + return stb6100_write_reg_range(state, &tmp, reg, 1);
5285     }
5286    
5287    
5288     diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
5289     index f3529df8211d..1a726196c126 100644
5290     --- a/drivers/media/dvb-frontends/stv0367.c
5291     +++ b/drivers/media/dvb-frontends/stv0367.c
5292     @@ -166,7 +166,9 @@ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
5293    
5294     static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
5295     {
5296     - return stv0367_writeregs(state, reg, &data, 1);
5297     + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5298     +
5299     + return stv0367_writeregs(state, reg, &tmp, 1);
5300     }
5301    
5302     static u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
5303     diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
5304     index 7ef469c0c866..2695e1eb6d9c 100644
5305     --- a/drivers/media/dvb-frontends/stv090x.c
5306     +++ b/drivers/media/dvb-frontends/stv090x.c
5307     @@ -755,7 +755,9 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8
5308    
5309     static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 data)
5310     {
5311     - return stv090x_write_regs(state, reg, &data, 1);
5312     + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5313     +
5314     + return stv090x_write_regs(state, reg, &tmp, 1);
5315     }
5316    
5317     static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable)
5318     diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
5319     index 66eba38f1014..7e8e01389c55 100644
5320     --- a/drivers/media/dvb-frontends/stv6110x.c
5321     +++ b/drivers/media/dvb-frontends/stv6110x.c
5322     @@ -97,7 +97,9 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da
5323    
5324     static int stv6110x_write_reg(struct stv6110x_state *stv6110x, u8 reg, u8 data)
5325     {
5326     - return stv6110x_write_regs(stv6110x, reg, &data, 1);
5327     + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5328     +
5329     + return stv6110x_write_regs(stv6110x, reg, &tmp, 1);
5330     }
5331    
5332     static int stv6110x_init(struct dvb_frontend *fe)
5333     diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
5334     index 931e5c98da8a..b879e1571469 100644
5335     --- a/drivers/media/dvb-frontends/ts2020.c
5336     +++ b/drivers/media/dvb-frontends/ts2020.c
5337     @@ -368,7 +368,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc,
5338     gain2 = clamp_t(long, gain2, 0, 13);
5339     v_agc = clamp_t(long, v_agc, 400, 1100);
5340    
5341     - *_gain = -(gain1 * 2330 +
5342     + *_gain = -((__s64)gain1 * 2330 +
5343     gain2 * 3500 +
5344     v_agc * 24 / 10 * 10 +
5345     10000);
5346     @@ -386,7 +386,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc,
5347     gain3 = clamp_t(long, gain3, 0, 6);
5348     v_agc = clamp_t(long, v_agc, 600, 1600);
5349    
5350     - *_gain = -(gain1 * 2650 +
5351     + *_gain = -((__s64)gain1 * 2650 +
5352     gain2 * 3380 +
5353     gain3 * 2850 +
5354     v_agc * 176 / 100 * 10 -
5355     diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
5356     index 623355fc2666..3208b866d1cb 100644
5357     --- a/drivers/media/dvb-frontends/zl10039.c
5358     +++ b/drivers/media/dvb-frontends/zl10039.c
5359     @@ -134,7 +134,9 @@ static inline int zl10039_writereg(struct zl10039_state *state,
5360     const enum zl10039_reg_addr reg,
5361     const u8 val)
5362     {
5363     - return zl10039_write(state, reg, &val, 1);
5364     + const u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5365     +
5366     + return zl10039_write(state, reg, &tmp, 1);
5367     }
5368    
5369     static int zl10039_init(struct dvb_frontend *fe)
5370     diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
5371     index 5cdf95bdc4d1..0de136b0d8c8 100644
5372     --- a/drivers/media/platform/vivid/vivid-core.h
5373     +++ b/drivers/media/platform/vivid/vivid-core.h
5374     @@ -154,6 +154,7 @@ struct vivid_dev {
5375     struct v4l2_ctrl_handler ctrl_hdl_streaming;
5376     struct v4l2_ctrl_handler ctrl_hdl_sdtv_cap;
5377     struct v4l2_ctrl_handler ctrl_hdl_loop_cap;
5378     + struct v4l2_ctrl_handler ctrl_hdl_fb;
5379     struct video_device vid_cap_dev;
5380     struct v4l2_ctrl_handler ctrl_hdl_vid_cap;
5381     struct video_device vid_out_dev;
5382     diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
5383     index 34731f71cc00..3f9d354827af 100644
5384     --- a/drivers/media/platform/vivid/vivid-ctrls.c
5385     +++ b/drivers/media/platform/vivid/vivid-ctrls.c
5386     @@ -120,9 +120,6 @@ static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
5387     clear_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
5388     clear_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
5389     break;
5390     - case VIVID_CID_CLEAR_FB:
5391     - vivid_clear_fb(dev);
5392     - break;
5393     case VIVID_CID_BUTTON:
5394     dev->button_pressed = 30;
5395     break;
5396     @@ -274,8 +271,28 @@ static const struct v4l2_ctrl_config vivid_ctrl_disconnect = {
5397     .type = V4L2_CTRL_TYPE_BUTTON,
5398     };
5399    
5400     +
5401     +/* Framebuffer Controls */
5402     +
5403     +static int vivid_fb_s_ctrl(struct v4l2_ctrl *ctrl)
5404     +{
5405     + struct vivid_dev *dev = container_of(ctrl->handler,
5406     + struct vivid_dev, ctrl_hdl_fb);
5407     +
5408     + switch (ctrl->id) {
5409     + case VIVID_CID_CLEAR_FB:
5410     + vivid_clear_fb(dev);
5411     + break;
5412     + }
5413     + return 0;
5414     +}
5415     +
5416     +static const struct v4l2_ctrl_ops vivid_fb_ctrl_ops = {
5417     + .s_ctrl = vivid_fb_s_ctrl,
5418     +};
5419     +
5420     static const struct v4l2_ctrl_config vivid_ctrl_clear_fb = {
5421     - .ops = &vivid_user_gen_ctrl_ops,
5422     + .ops = &vivid_fb_ctrl_ops,
5423     .id = VIVID_CID_CLEAR_FB,
5424     .name = "Clear Framebuffer",
5425     .type = V4L2_CTRL_TYPE_BUTTON,
5426     @@ -1357,6 +1374,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
5427     struct v4l2_ctrl_handler *hdl_streaming = &dev->ctrl_hdl_streaming;
5428     struct v4l2_ctrl_handler *hdl_sdtv_cap = &dev->ctrl_hdl_sdtv_cap;
5429     struct v4l2_ctrl_handler *hdl_loop_cap = &dev->ctrl_hdl_loop_cap;
5430     + struct v4l2_ctrl_handler *hdl_fb = &dev->ctrl_hdl_fb;
5431     struct v4l2_ctrl_handler *hdl_vid_cap = &dev->ctrl_hdl_vid_cap;
5432     struct v4l2_ctrl_handler *hdl_vid_out = &dev->ctrl_hdl_vid_out;
5433     struct v4l2_ctrl_handler *hdl_vbi_cap = &dev->ctrl_hdl_vbi_cap;
5434     @@ -1384,10 +1402,12 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
5435     v4l2_ctrl_new_custom(hdl_sdtv_cap, &vivid_ctrl_class, NULL);
5436     v4l2_ctrl_handler_init(hdl_loop_cap, 1);
5437     v4l2_ctrl_new_custom(hdl_loop_cap, &vivid_ctrl_class, NULL);
5438     + v4l2_ctrl_handler_init(hdl_fb, 1);
5439     + v4l2_ctrl_new_custom(hdl_fb, &vivid_ctrl_class, NULL);
5440     v4l2_ctrl_handler_init(hdl_vid_cap, 55);
5441     v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_class, NULL);
5442     v4l2_ctrl_handler_init(hdl_vid_out, 26);
5443     - if (!no_error_inj)
5444     + if (!no_error_inj || dev->has_fb)
5445     v4l2_ctrl_new_custom(hdl_vid_out, &vivid_ctrl_class, NULL);
5446     v4l2_ctrl_handler_init(hdl_vbi_cap, 21);
5447     v4l2_ctrl_new_custom(hdl_vbi_cap, &vivid_ctrl_class, NULL);
5448     @@ -1561,7 +1581,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
5449     v4l2_ctrl_new_custom(hdl_loop_cap, &vivid_ctrl_loop_video, NULL);
5450    
5451     if (dev->has_fb)
5452     - v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_clear_fb, NULL);
5453     + v4l2_ctrl_new_custom(hdl_fb, &vivid_ctrl_clear_fb, NULL);
5454    
5455     if (dev->has_radio_rx) {
5456     v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_hw_seek_mode, NULL);
5457     @@ -1658,6 +1678,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
5458     v4l2_ctrl_add_handler(hdl_vid_cap, hdl_streaming, NULL);
5459     v4l2_ctrl_add_handler(hdl_vid_cap, hdl_sdtv_cap, NULL);
5460     v4l2_ctrl_add_handler(hdl_vid_cap, hdl_loop_cap, NULL);
5461     + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_fb, NULL);
5462     if (hdl_vid_cap->error)
5463     return hdl_vid_cap->error;
5464     dev->vid_cap_dev.ctrl_handler = hdl_vid_cap;
5465     @@ -1666,6 +1687,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
5466     v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_gen, NULL);
5467     v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_aud, NULL);
5468     v4l2_ctrl_add_handler(hdl_vid_out, hdl_streaming, NULL);
5469     + v4l2_ctrl_add_handler(hdl_vid_out, hdl_fb, NULL);
5470     if (hdl_vid_out->error)
5471     return hdl_vid_out->error;
5472     dev->vid_out_dev.ctrl_handler = hdl_vid_out;
5473     @@ -1725,4 +1747,5 @@ void vivid_free_controls(struct vivid_dev *dev)
5474     v4l2_ctrl_handler_free(&dev->ctrl_hdl_streaming);
5475     v4l2_ctrl_handler_free(&dev->ctrl_hdl_sdtv_cap);
5476     v4l2_ctrl_handler_free(&dev->ctrl_hdl_loop_cap);
5477     + v4l2_ctrl_handler_free(&dev->ctrl_hdl_fb);
5478     }
5479     diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
5480     index 5e320fa4a795..be26c029546b 100644
5481     --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
5482     +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
5483     @@ -494,18 +494,23 @@ static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
5484    
5485     static int lme2510_return_status(struct dvb_usb_device *d)
5486     {
5487     - int ret = 0;
5488     + int ret;
5489     u8 *data;
5490    
5491     - data = kzalloc(10, GFP_KERNEL);
5492     + data = kzalloc(6, GFP_KERNEL);
5493     if (!data)
5494     return -ENOMEM;
5495    
5496     - ret |= usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
5497     - 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200);
5498     - info("Firmware Status: %x (%x)", ret , data[2]);
5499     + ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
5500     + 0x06, 0x80, 0x0302, 0x00,
5501     + data, 0x6, 200);
5502     + if (ret != 6)
5503     + ret = -EINVAL;
5504     + else
5505     + ret = data[2];
5506     +
5507     + info("Firmware Status: %6ph", data);
5508    
5509     - ret = (ret < 0) ? -ENODEV : data[2];
5510     kfree(data);
5511     return ret;
5512     }
5513     @@ -1071,8 +1076,6 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
5514    
5515     if (adap->fe[0]) {
5516     info("FE Found M88RS2000");
5517     - dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config,
5518     - &d->i2c_adap);
5519     st->i2c_tuner_gate_w = 5;
5520     st->i2c_tuner_gate_r = 5;
5521     st->i2c_tuner_addr = 0x60;
5522     @@ -1138,17 +1141,18 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
5523     ret = st->tuner_config;
5524     break;
5525     case TUNER_RS2000:
5526     - ret = st->tuner_config;
5527     + if (dvb_attach(ts2020_attach, adap->fe[0],
5528     + &ts2020_config, &d->i2c_adap))
5529     + ret = st->tuner_config;
5530     break;
5531     default:
5532     break;
5533     }
5534    
5535     - if (ret)
5536     + if (ret) {
5537     info("TUN Found %s tuner", tun_msg[ret]);
5538     - else {
5539     - info("TUN No tuner found --- resetting device");
5540     - lme_coldreset(d);
5541     + } else {
5542     + info("TUN No tuner found");
5543     return -ENODEV;
5544     }
5545    
5546     @@ -1189,6 +1193,7 @@ static int lme2510_get_adapter_count(struct dvb_usb_device *d)
5547     static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
5548     {
5549     struct lme2510_state *st = d->priv;
5550     + int status;
5551    
5552     usb_reset_configuration(d->udev);
5553    
5554     @@ -1197,12 +1202,16 @@ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
5555    
5556     st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware;
5557    
5558     - if (lme2510_return_status(d) == 0x44) {
5559     + status = lme2510_return_status(d);
5560     + if (status == 0x44) {
5561     *name = lme_firmware_switch(d, 0);
5562     return COLD;
5563     }
5564    
5565     - return 0;
5566     + if (status != 0x47)
5567     + return -EINVAL;
5568     +
5569     + return WARM;
5570     }
5571    
5572     static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
5573     diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
5574     index 37dea0adc695..cfe86b4864b3 100644
5575     --- a/drivers/media/usb/dvb-usb/cxusb.c
5576     +++ b/drivers/media/usb/dvb-usb/cxusb.c
5577     @@ -677,6 +677,8 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component,
5578     case XC2028_RESET_CLK:
5579     deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg);
5580     break;
5581     + case XC2028_I2C_FLUSH:
5582     + break;
5583     default:
5584     deb_info("%s: unknown command %d, arg %d\n", __func__,
5585     command, arg);
5586     diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
5587     index 366b05529915..a9968fb1e8e4 100644
5588     --- a/drivers/media/usb/dvb-usb/dib0700_devices.c
5589     +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
5590     @@ -430,6 +430,7 @@ static int stk7700ph_xc3028_callback(void *ptr, int component,
5591     state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
5592     break;
5593     case XC2028_RESET_CLK:
5594     + case XC2028_I2C_FLUSH:
5595     break;
5596     default:
5597     err("%s: unknown command %d, arg %d\n", __func__,
5598     diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
5599     index dbe29c6c4d8b..1e8cbaf36896 100644
5600     --- a/drivers/media/usb/hdpvr/hdpvr-core.c
5601     +++ b/drivers/media/usb/hdpvr/hdpvr-core.c
5602     @@ -292,7 +292,7 @@ static int hdpvr_probe(struct usb_interface *interface,
5603     /* register v4l2_device early so it can be used for printks */
5604     if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
5605     dev_err(&interface->dev, "v4l2_device_register failed\n");
5606     - goto error;
5607     + goto error_free_dev;
5608     }
5609    
5610     mutex_init(&dev->io_mutex);
5611     @@ -301,7 +301,7 @@ static int hdpvr_probe(struct usb_interface *interface,
5612     dev->usbc_buf = kmalloc(64, GFP_KERNEL);
5613     if (!dev->usbc_buf) {
5614     v4l2_err(&dev->v4l2_dev, "Out of memory\n");
5615     - goto error;
5616     + goto error_v4l2_unregister;
5617     }
5618    
5619     init_waitqueue_head(&dev->wait_buffer);
5620     @@ -339,13 +339,13 @@ static int hdpvr_probe(struct usb_interface *interface,
5621     }
5622     if (!dev->bulk_in_endpointAddr) {
5623     v4l2_err(&dev->v4l2_dev, "Could not find bulk-in endpoint\n");
5624     - goto error;
5625     + goto error_put_usb;
5626     }
5627    
5628     /* init the device */
5629     if (hdpvr_device_init(dev)) {
5630     v4l2_err(&dev->v4l2_dev, "device init failed\n");
5631     - goto error;
5632     + goto error_put_usb;
5633     }
5634    
5635     mutex_lock(&dev->io_mutex);
5636     @@ -353,7 +353,7 @@ static int hdpvr_probe(struct usb_interface *interface,
5637     mutex_unlock(&dev->io_mutex);
5638     v4l2_err(&dev->v4l2_dev,
5639     "allocating transfer buffers failed\n");
5640     - goto error;
5641     + goto error_put_usb;
5642     }
5643     mutex_unlock(&dev->io_mutex);
5644    
5645     @@ -361,7 +361,7 @@ static int hdpvr_probe(struct usb_interface *interface,
5646     retval = hdpvr_register_i2c_adapter(dev);
5647     if (retval < 0) {
5648     v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
5649     - goto error;
5650     + goto error_free_buffers;
5651     }
5652    
5653     client = hdpvr_register_ir_rx_i2c(dev);
5654     @@ -394,13 +394,17 @@ static int hdpvr_probe(struct usb_interface *interface,
5655     reg_fail:
5656     #if IS_ENABLED(CONFIG_I2C)
5657     i2c_del_adapter(&dev->i2c_adapter);
5658     +error_free_buffers:
5659     #endif
5660     + hdpvr_free_buffers(dev);
5661     +error_put_usb:
5662     + usb_put_dev(dev->udev);
5663     + kfree(dev->usbc_buf);
5664     +error_v4l2_unregister:
5665     + v4l2_device_unregister(&dev->v4l2_dev);
5666     +error_free_dev:
5667     + kfree(dev);
5668     error:
5669     - if (dev) {
5670     - flush_work(&dev->worker);
5671     - /* this frees allocated memory */
5672     - hdpvr_delete(dev);
5673     - }
5674     return retval;
5675     }
5676    
5677     diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
5678     index 821f2aa299ae..cbeea8343a5c 100644
5679     --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
5680     +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
5681     @@ -18,8 +18,18 @@
5682     #include <linux/videodev2.h>
5683     #include <linux/v4l2-subdev.h>
5684     #include <media/v4l2-dev.h>
5685     +#include <media/v4l2-fh.h>
5686     +#include <media/v4l2-ctrls.h>
5687     #include <media/v4l2-ioctl.h>
5688    
5689     +/* Use the same argument order as copy_in_user */
5690     +#define assign_in_user(to, from) \
5691     +({ \
5692     + typeof(*from) __assign_tmp; \
5693     + \
5694     + get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \
5695     +})
5696     +
5697     static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5698     {
5699     long ret = -ENOIOCTLCMD;
5700     @@ -46,135 +56,75 @@ struct v4l2_window32 {
5701     __u8 global_alpha;
5702     };
5703    
5704     -static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
5705     -{
5706     - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) ||
5707     - copy_from_user(&kp->w, &up->w, sizeof(up->w)) ||
5708     - get_user(kp->field, &up->field) ||
5709     - get_user(kp->chromakey, &up->chromakey) ||
5710     - get_user(kp->clipcount, &up->clipcount) ||
5711     - get_user(kp->global_alpha, &up->global_alpha))
5712     - return -EFAULT;
5713     - if (kp->clipcount > 2048)
5714     - return -EINVAL;
5715     - if (kp->clipcount) {
5716     - struct v4l2_clip32 __user *uclips;
5717     - struct v4l2_clip __user *kclips;
5718     - int n = kp->clipcount;
5719     - compat_caddr_t p;
5720     -
5721     - if (get_user(p, &up->clips))
5722     - return -EFAULT;
5723     - uclips = compat_ptr(p);
5724     - kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip));
5725     - kp->clips = kclips;
5726     - while (--n >= 0) {
5727     - if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
5728     - return -EFAULT;
5729     - if (put_user(n ? kclips + 1 : NULL, &kclips->next))
5730     - return -EFAULT;
5731     - uclips += 1;
5732     - kclips += 1;
5733     - }
5734     - } else
5735     - kp->clips = NULL;
5736     - return 0;
5737     -}
5738     -
5739     -static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
5740     -{
5741     - if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) ||
5742     - put_user(kp->field, &up->field) ||
5743     - put_user(kp->chromakey, &up->chromakey) ||
5744     - put_user(kp->clipcount, &up->clipcount) ||
5745     - put_user(kp->global_alpha, &up->global_alpha))
5746     - return -EFAULT;
5747     - return 0;
5748     -}
5749     -
5750     -static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
5751     -{
5752     - if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
5753     - return -EFAULT;
5754     - return 0;
5755     -}
5756     -
5757     -static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
5758     - struct v4l2_pix_format_mplane __user *up)
5759     -{
5760     - if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
5761     - return -EFAULT;
5762     - return 0;
5763     -}
5764     -
5765     -static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
5766     +static int get_v4l2_window32(struct v4l2_window __user *kp,
5767     + struct v4l2_window32 __user *up,
5768     + void __user *aux_buf, u32 aux_space)
5769     {
5770     - if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
5771     - return -EFAULT;
5772     - return 0;
5773     -}
5774     -
5775     -static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
5776     - struct v4l2_pix_format_mplane __user *up)
5777     -{
5778     - if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
5779     - return -EFAULT;
5780     - return 0;
5781     -}
5782     -
5783     -static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
5784     -{
5785     - if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
5786     - return -EFAULT;
5787     - return 0;
5788     -}
5789     -
5790     -static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
5791     -{
5792     - if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format)))
5793     + struct v4l2_clip32 __user *uclips;
5794     + struct v4l2_clip __user *kclips;
5795     + compat_caddr_t p;
5796     + u32 clipcount;
5797     +
5798     + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
5799     + copy_in_user(&kp->w, &up->w, sizeof(up->w)) ||
5800     + assign_in_user(&kp->field, &up->field) ||
5801     + assign_in_user(&kp->chromakey, &up->chromakey) ||
5802     + assign_in_user(&kp->global_alpha, &up->global_alpha) ||
5803     + get_user(clipcount, &up->clipcount) ||
5804     + put_user(clipcount, &kp->clipcount))
5805     return -EFAULT;
5806     - return 0;
5807     -}
5808     + if (clipcount > 2048)
5809     + return -EINVAL;
5810     + if (!clipcount)
5811     + return put_user(NULL, &kp->clips);
5812    
5813     -static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
5814     -{
5815     - if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
5816     + if (get_user(p, &up->clips))
5817     return -EFAULT;
5818     - return 0;
5819     -}
5820     -
5821     -static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
5822     -{
5823     - if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
5824     + uclips = compat_ptr(p);
5825     + if (aux_space < clipcount * sizeof(*kclips))
5826     return -EFAULT;
5827     - return 0;
5828     -}
5829     -
5830     -static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
5831     -{
5832     - if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format)))
5833     + kclips = aux_buf;
5834     + if (put_user(kclips, &kp->clips))
5835     return -EFAULT;
5836     - return 0;
5837     -}
5838    
5839     -static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
5840     -{
5841     - if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format)))
5842     - return -EFAULT;
5843     + while (clipcount--) {
5844     + if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
5845     + return -EFAULT;
5846     + if (put_user(clipcount ? kclips + 1 : NULL, &kclips->next))
5847     + return -EFAULT;
5848     + uclips++;
5849     + kclips++;
5850     + }
5851     return 0;
5852     }
5853    
5854     -static inline int get_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up)
5855     +static int put_v4l2_window32(struct v4l2_window __user *kp,
5856     + struct v4l2_window32 __user *up)
5857     {
5858     - if (copy_from_user(kp, up, sizeof(struct v4l2_meta_format)))
5859     + struct v4l2_clip __user *kclips = kp->clips;
5860     + struct v4l2_clip32 __user *uclips;
5861     + compat_caddr_t p;
5862     + u32 clipcount;
5863     +
5864     + if (copy_in_user(&up->w, &kp->w, sizeof(kp->w)) ||
5865     + assign_in_user(&up->field, &kp->field) ||
5866     + assign_in_user(&up->chromakey, &kp->chromakey) ||
5867     + assign_in_user(&up->global_alpha, &kp->global_alpha) ||
5868     + get_user(clipcount, &kp->clipcount) ||
5869     + put_user(clipcount, &up->clipcount))
5870     return -EFAULT;
5871     - return 0;
5872     -}
5873     + if (!clipcount)
5874     + return 0;
5875    
5876     -static inline int put_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up)
5877     -{
5878     - if (copy_to_user(up, kp, sizeof(struct v4l2_meta_format)))
5879     + if (get_user(p, &up->clips))
5880     return -EFAULT;
5881     + uclips = compat_ptr(p);
5882     + while (clipcount--) {
5883     + if (copy_in_user(&uclips->c, &kclips->c, sizeof(uclips->c)))
5884     + return -EFAULT;
5885     + uclips++;
5886     + kclips++;
5887     + }
5888     return 0;
5889     }
5890    
5891     @@ -209,101 +159,164 @@ struct v4l2_create_buffers32 {
5892     __u32 reserved[8];
5893     };
5894    
5895     -static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
5896     +static int __bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size)
5897     {
5898     - if (get_user(kp->type, &up->type))
5899     + u32 type;
5900     +
5901     + if (get_user(type, &up->type))
5902     return -EFAULT;
5903    
5904     - switch (kp->type) {
5905     + switch (type) {
5906     + case V4L2_BUF_TYPE_VIDEO_OVERLAY:
5907     + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: {
5908     + u32 clipcount;
5909     +
5910     + if (get_user(clipcount, &up->fmt.win.clipcount))
5911     + return -EFAULT;
5912     + if (clipcount > 2048)
5913     + return -EINVAL;
5914     + *size = clipcount * sizeof(struct v4l2_clip);
5915     + return 0;
5916     + }
5917     + default:
5918     + *size = 0;
5919     + return 0;
5920     + }
5921     +}
5922     +
5923     +static int bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size)
5924     +{
5925     + if (!access_ok(VERIFY_READ, up, sizeof(*up)))
5926     + return -EFAULT;
5927     + return __bufsize_v4l2_format(up, size);
5928     +}
5929     +
5930     +static int __get_v4l2_format32(struct v4l2_format __user *kp,
5931     + struct v4l2_format32 __user *up,
5932     + void __user *aux_buf, u32 aux_space)
5933     +{
5934     + u32 type;
5935     +
5936     + if (get_user(type, &up->type) || put_user(type, &kp->type))
5937     + return -EFAULT;
5938     +
5939     + switch (type) {
5940     case V4L2_BUF_TYPE_VIDEO_CAPTURE:
5941     case V4L2_BUF_TYPE_VIDEO_OUTPUT:
5942     - return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
5943     + return copy_in_user(&kp->fmt.pix, &up->fmt.pix,
5944     + sizeof(kp->fmt.pix)) ? -EFAULT : 0;
5945     case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
5946     case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
5947     - return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
5948     - &up->fmt.pix_mp);
5949     + return copy_in_user(&kp->fmt.pix_mp, &up->fmt.pix_mp,
5950     + sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0;
5951     case V4L2_BUF_TYPE_VIDEO_OVERLAY:
5952     case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
5953     - return get_v4l2_window32(&kp->fmt.win, &up->fmt.win);
5954     + return get_v4l2_window32(&kp->fmt.win, &up->fmt.win,
5955     + aux_buf, aux_space);
5956     case V4L2_BUF_TYPE_VBI_CAPTURE:
5957     case V4L2_BUF_TYPE_VBI_OUTPUT:
5958     - return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
5959     + return copy_in_user(&kp->fmt.vbi, &up->fmt.vbi,
5960     + sizeof(kp->fmt.vbi)) ? -EFAULT : 0;
5961     case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
5962     case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
5963     - return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
5964     + return copy_in_user(&kp->fmt.sliced, &up->fmt.sliced,
5965     + sizeof(kp->fmt.sliced)) ? -EFAULT : 0;
5966     case V4L2_BUF_TYPE_SDR_CAPTURE:
5967     case V4L2_BUF_TYPE_SDR_OUTPUT:
5968     - return get_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr);
5969     + return copy_in_user(&kp->fmt.sdr, &up->fmt.sdr,
5970     + sizeof(kp->fmt.sdr)) ? -EFAULT : 0;
5971     case V4L2_BUF_TYPE_META_CAPTURE:
5972     - return get_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta);
5973     + return copy_in_user(&kp->fmt.meta, &up->fmt.meta,
5974     + sizeof(kp->fmt.meta)) ? -EFAULT : 0;
5975     default:
5976     - pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
5977     - kp->type);
5978     return -EINVAL;
5979     }
5980     }
5981    
5982     -static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
5983     +static int get_v4l2_format32(struct v4l2_format __user *kp,
5984     + struct v4l2_format32 __user *up,
5985     + void __user *aux_buf, u32 aux_space)
5986     +{
5987     + if (!access_ok(VERIFY_READ, up, sizeof(*up)))
5988     + return -EFAULT;
5989     + return __get_v4l2_format32(kp, up, aux_buf, aux_space);
5990     +}
5991     +
5992     +static int bufsize_v4l2_create(struct v4l2_create_buffers32 __user *up,
5993     + u32 *size)
5994     {
5995     - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
5996     + if (!access_ok(VERIFY_READ, up, sizeof(*up)))
5997     return -EFAULT;
5998     - return __get_v4l2_format32(kp, up);
5999     + return __bufsize_v4l2_format(&up->format, size);
6000     }
6001    
6002     -static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
6003     +static int get_v4l2_create32(struct v4l2_create_buffers __user *kp,
6004     + struct v4l2_create_buffers32 __user *up,
6005     + void __user *aux_buf, u32 aux_space)
6006     {
6007     - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
6008     - copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
6009     + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6010     + copy_in_user(kp, up,
6011     + offsetof(struct v4l2_create_buffers32, format)))
6012     return -EFAULT;
6013     - return __get_v4l2_format32(&kp->format, &up->format);
6014     + return __get_v4l2_format32(&kp->format, &up->format,
6015     + aux_buf, aux_space);
6016     }
6017    
6018     -static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
6019     +static int __put_v4l2_format32(struct v4l2_format __user *kp,
6020     + struct v4l2_format32 __user *up)
6021     {
6022     - if (put_user(kp->type, &up->type))
6023     + u32 type;
6024     +
6025     + if (get_user(type, &kp->type))
6026     return -EFAULT;
6027    
6028     - switch (kp->type) {
6029     + switch (type) {
6030     case V4L2_BUF_TYPE_VIDEO_CAPTURE:
6031     case V4L2_BUF_TYPE_VIDEO_OUTPUT:
6032     - return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
6033     + return copy_in_user(&up->fmt.pix, &kp->fmt.pix,
6034     + sizeof(kp->fmt.pix)) ? -EFAULT : 0;
6035     case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
6036     case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
6037     - return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
6038     - &up->fmt.pix_mp);
6039     + return copy_in_user(&up->fmt.pix_mp, &kp->fmt.pix_mp,
6040     + sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0;
6041     case V4L2_BUF_TYPE_VIDEO_OVERLAY:
6042     case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
6043     return put_v4l2_window32(&kp->fmt.win, &up->fmt.win);
6044     case V4L2_BUF_TYPE_VBI_CAPTURE:
6045     case V4L2_BUF_TYPE_VBI_OUTPUT:
6046     - return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
6047     + return copy_in_user(&up->fmt.vbi, &kp->fmt.vbi,
6048     + sizeof(kp->fmt.vbi)) ? -EFAULT : 0;
6049     case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
6050     case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
6051     - return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
6052     + return copy_in_user(&up->fmt.sliced, &kp->fmt.sliced,
6053     + sizeof(kp->fmt.sliced)) ? -EFAULT : 0;
6054     case V4L2_BUF_TYPE_SDR_CAPTURE:
6055     case V4L2_BUF_TYPE_SDR_OUTPUT:
6056     - return put_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr);
6057     + return copy_in_user(&up->fmt.sdr, &kp->fmt.sdr,
6058     + sizeof(kp->fmt.sdr)) ? -EFAULT : 0;
6059     case V4L2_BUF_TYPE_META_CAPTURE:
6060     - return put_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta);
6061     + return copy_in_user(&up->fmt.meta, &kp->fmt.meta,
6062     + sizeof(kp->fmt.meta)) ? -EFAULT : 0;
6063     default:
6064     - pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
6065     - kp->type);
6066     return -EINVAL;
6067     }
6068     }
6069    
6070     -static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
6071     +static int put_v4l2_format32(struct v4l2_format __user *kp,
6072     + struct v4l2_format32 __user *up)
6073     {
6074     - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)))
6075     + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)))
6076     return -EFAULT;
6077     return __put_v4l2_format32(kp, up);
6078     }
6079    
6080     -static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
6081     +static int put_v4l2_create32(struct v4l2_create_buffers __user *kp,
6082     + struct v4l2_create_buffers32 __user *up)
6083     {
6084     - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
6085     - copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) ||
6086     - copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
6087     + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
6088     + copy_in_user(up, kp,
6089     + offsetof(struct v4l2_create_buffers32, format)) ||
6090     + copy_in_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
6091     return -EFAULT;
6092     return __put_v4l2_format32(&kp->format, &up->format);
6093     }
6094     @@ -317,25 +330,28 @@ struct v4l2_standard32 {
6095     __u32 reserved[4];
6096     };
6097    
6098     -static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
6099     +static int get_v4l2_standard32(struct v4l2_standard __user *kp,
6100     + struct v4l2_standard32 __user *up)
6101     {
6102     /* other fields are not set by the user, nor used by the driver */
6103     - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) ||
6104     - get_user(kp->index, &up->index))
6105     + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6106     + assign_in_user(&kp->index, &up->index))
6107     return -EFAULT;
6108     return 0;
6109     }
6110    
6111     -static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
6112     +static int put_v4l2_standard32(struct v4l2_standard __user *kp,
6113     + struct v4l2_standard32 __user *up)
6114     {
6115     - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
6116     - put_user(kp->index, &up->index) ||
6117     - put_user(kp->id, &up->id) ||
6118     - copy_to_user(up->name, kp->name, 24) ||
6119     - copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
6120     - put_user(kp->framelines, &up->framelines) ||
6121     - copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
6122     - return -EFAULT;
6123     + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
6124     + assign_in_user(&up->index, &kp->index) ||
6125     + assign_in_user(&up->id, &kp->id) ||
6126     + copy_in_user(up->name, kp->name, sizeof(up->name)) ||
6127     + copy_in_user(&up->frameperiod, &kp->frameperiod,
6128     + sizeof(up->frameperiod)) ||
6129     + assign_in_user(&up->framelines, &kp->framelines) ||
6130     + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
6131     + return -EFAULT;
6132     return 0;
6133     }
6134    
6135     @@ -374,136 +390,186 @@ struct v4l2_buffer32 {
6136     __u32 reserved;
6137     };
6138    
6139     -static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
6140     - enum v4l2_memory memory)
6141     +static int get_v4l2_plane32(struct v4l2_plane __user *up,
6142     + struct v4l2_plane32 __user *up32,
6143     + enum v4l2_memory memory)
6144     {
6145     - void __user *up_pln;
6146     - compat_long_t p;
6147     + compat_ulong_t p;
6148    
6149     if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
6150     - copy_in_user(&up->data_offset, &up32->data_offset,
6151     - sizeof(__u32)))
6152     + copy_in_user(&up->data_offset, &up32->data_offset,
6153     + sizeof(up->data_offset)))
6154     return -EFAULT;
6155    
6156     - if (memory == V4L2_MEMORY_USERPTR) {
6157     - if (get_user(p, &up32->m.userptr))
6158     - return -EFAULT;
6159     - up_pln = compat_ptr(p);
6160     - if (put_user((unsigned long)up_pln, &up->m.userptr))
6161     + switch (memory) {
6162     + case V4L2_MEMORY_MMAP:
6163     + case V4L2_MEMORY_OVERLAY:
6164     + if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
6165     + sizeof(up32->m.mem_offset)))
6166     return -EFAULT;
6167     - } else if (memory == V4L2_MEMORY_DMABUF) {
6168     - if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int)))
6169     + break;
6170     + case V4L2_MEMORY_USERPTR:
6171     + if (get_user(p, &up32->m.userptr) ||
6172     + put_user((unsigned long)compat_ptr(p), &up->m.userptr))
6173     return -EFAULT;
6174     - } else {
6175     - if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
6176     - sizeof(__u32)))
6177     + break;
6178     + case V4L2_MEMORY_DMABUF:
6179     + if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(up32->m.fd)))
6180     return -EFAULT;
6181     + break;
6182     }
6183    
6184     return 0;
6185     }
6186    
6187     -static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
6188     - enum v4l2_memory memory)
6189     +static int put_v4l2_plane32(struct v4l2_plane __user *up,
6190     + struct v4l2_plane32 __user *up32,
6191     + enum v4l2_memory memory)
6192     {
6193     + unsigned long p;
6194     +
6195     if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
6196     - copy_in_user(&up32->data_offset, &up->data_offset,
6197     - sizeof(__u32)))
6198     + copy_in_user(&up32->data_offset, &up->data_offset,
6199     + sizeof(up->data_offset)))
6200     return -EFAULT;
6201    
6202     - /* For MMAP, driver might've set up the offset, so copy it back.
6203     - * USERPTR stays the same (was userspace-provided), so no copying. */
6204     - if (memory == V4L2_MEMORY_MMAP)
6205     + switch (memory) {
6206     + case V4L2_MEMORY_MMAP:
6207     + case V4L2_MEMORY_OVERLAY:
6208     if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
6209     - sizeof(__u32)))
6210     + sizeof(up->m.mem_offset)))
6211     + return -EFAULT;
6212     + break;
6213     + case V4L2_MEMORY_USERPTR:
6214     + if (get_user(p, &up->m.userptr) ||
6215     + put_user((compat_ulong_t)ptr_to_compat((__force void *)p),
6216     + &up32->m.userptr))
6217     return -EFAULT;
6218     - /* For DMABUF, driver might've set up the fd, so copy it back. */
6219     - if (memory == V4L2_MEMORY_DMABUF)
6220     - if (copy_in_user(&up32->m.fd, &up->m.fd,
6221     - sizeof(int)))
6222     + break;
6223     + case V4L2_MEMORY_DMABUF:
6224     + if (copy_in_user(&up32->m.fd, &up->m.fd, sizeof(up->m.fd)))
6225     return -EFAULT;
6226     + break;
6227     + }
6228     +
6229     + return 0;
6230     +}
6231     +
6232     +static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *up, u32 *size)
6233     +{
6234     + u32 type;
6235     + u32 length;
6236     +
6237     + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6238     + get_user(type, &up->type) ||
6239     + get_user(length, &up->length))
6240     + return -EFAULT;
6241    
6242     + if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
6243     + if (length > VIDEO_MAX_PLANES)
6244     + return -EINVAL;
6245     +
6246     + /*
6247     + * We don't really care if userspace decides to kill itself
6248     + * by passing a very big length value
6249     + */
6250     + *size = length * sizeof(struct v4l2_plane);
6251     + } else {
6252     + *size = 0;
6253     + }
6254     return 0;
6255     }
6256    
6257     -static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
6258     +static int get_v4l2_buffer32(struct v4l2_buffer __user *kp,
6259     + struct v4l2_buffer32 __user *up,
6260     + void __user *aux_buf, u32 aux_space)
6261     {
6262     + u32 type;
6263     + u32 length;
6264     + enum v4l2_memory memory;
6265     struct v4l2_plane32 __user *uplane32;
6266     struct v4l2_plane __user *uplane;
6267     compat_caddr_t p;
6268     int ret;
6269    
6270     - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
6271     - get_user(kp->index, &up->index) ||
6272     - get_user(kp->type, &up->type) ||
6273     - get_user(kp->flags, &up->flags) ||
6274     - get_user(kp->memory, &up->memory) ||
6275     - get_user(kp->length, &up->length))
6276     - return -EFAULT;
6277     + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6278     + assign_in_user(&kp->index, &up->index) ||
6279     + get_user(type, &up->type) ||
6280     + put_user(type, &kp->type) ||
6281     + assign_in_user(&kp->flags, &up->flags) ||
6282     + get_user(memory, &up->memory) ||
6283     + put_user(memory, &kp->memory) ||
6284     + get_user(length, &up->length) ||
6285     + put_user(length, &kp->length))
6286     + return -EFAULT;
6287    
6288     - if (V4L2_TYPE_IS_OUTPUT(kp->type))
6289     - if (get_user(kp->bytesused, &up->bytesused) ||
6290     - get_user(kp->field, &up->field) ||
6291     - get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
6292     - get_user(kp->timestamp.tv_usec,
6293     - &up->timestamp.tv_usec))
6294     + if (V4L2_TYPE_IS_OUTPUT(type))
6295     + if (assign_in_user(&kp->bytesused, &up->bytesused) ||
6296     + assign_in_user(&kp->field, &up->field) ||
6297     + assign_in_user(&kp->timestamp.tv_sec,
6298     + &up->timestamp.tv_sec) ||
6299     + assign_in_user(&kp->timestamp.tv_usec,
6300     + &up->timestamp.tv_usec))
6301     return -EFAULT;
6302    
6303     - if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
6304     - unsigned int num_planes;
6305     + if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
6306     + u32 num_planes = length;
6307    
6308     - if (kp->length == 0) {
6309     - kp->m.planes = NULL;
6310     - /* num_planes == 0 is legal, e.g. when userspace doesn't
6311     - * need planes array on DQBUF*/
6312     - return 0;
6313     - } else if (kp->length > VIDEO_MAX_PLANES) {
6314     - return -EINVAL;
6315     + if (num_planes == 0) {
6316     + /*
6317     + * num_planes == 0 is legal, e.g. when userspace doesn't
6318     + * need planes array on DQBUF
6319     + */
6320     + return put_user(NULL, &kp->m.planes);
6321     }
6322     + if (num_planes > VIDEO_MAX_PLANES)
6323     + return -EINVAL;
6324    
6325     if (get_user(p, &up->m.planes))
6326     return -EFAULT;
6327    
6328     uplane32 = compat_ptr(p);
6329     if (!access_ok(VERIFY_READ, uplane32,
6330     - kp->length * sizeof(struct v4l2_plane32)))
6331     + num_planes * sizeof(*uplane32)))
6332     return -EFAULT;
6333    
6334     - /* We don't really care if userspace decides to kill itself
6335     - * by passing a very big num_planes value */
6336     - uplane = compat_alloc_user_space(kp->length *
6337     - sizeof(struct v4l2_plane));
6338     - kp->m.planes = (__force struct v4l2_plane *)uplane;
6339     + /*
6340     + * We don't really care if userspace decides to kill itself
6341     + * by passing a very big num_planes value
6342     + */
6343     + if (aux_space < num_planes * sizeof(*uplane))
6344     + return -EFAULT;
6345    
6346     - for (num_planes = 0; num_planes < kp->length; num_planes++) {
6347     - ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
6348     + uplane = aux_buf;
6349     + if (put_user((__force struct v4l2_plane *)uplane,
6350     + &kp->m.planes))
6351     + return -EFAULT;
6352     +
6353     + while (num_planes--) {
6354     + ret = get_v4l2_plane32(uplane, uplane32, memory);
6355     if (ret)
6356     return ret;
6357     - ++uplane;
6358     - ++uplane32;
6359     + uplane++;
6360     + uplane32++;
6361     }
6362     } else {
6363     - switch (kp->memory) {
6364     + switch (memory) {
6365     case V4L2_MEMORY_MMAP:
6366     - if (get_user(kp->m.offset, &up->m.offset))
6367     + case V4L2_MEMORY_OVERLAY:
6368     + if (assign_in_user(&kp->m.offset, &up->m.offset))
6369     return -EFAULT;
6370     break;
6371     - case V4L2_MEMORY_USERPTR:
6372     - {
6373     - compat_long_t tmp;
6374     + case V4L2_MEMORY_USERPTR: {
6375     + compat_ulong_t userptr;
6376    
6377     - if (get_user(tmp, &up->m.userptr))
6378     - return -EFAULT;
6379     -
6380     - kp->m.userptr = (unsigned long)compat_ptr(tmp);
6381     - }
6382     - break;
6383     - case V4L2_MEMORY_OVERLAY:
6384     - if (get_user(kp->m.offset, &up->m.offset))
6385     + if (get_user(userptr, &up->m.userptr) ||
6386     + put_user((unsigned long)compat_ptr(userptr),
6387     + &kp->m.userptr))
6388     return -EFAULT;
6389     break;
6390     + }
6391     case V4L2_MEMORY_DMABUF:
6392     - if (get_user(kp->m.fd, &up->m.fd))
6393     + if (assign_in_user(&kp->m.fd, &up->m.fd))
6394     return -EFAULT;
6395     break;
6396     }
6397     @@ -512,65 +578,70 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
6398     return 0;
6399     }
6400    
6401     -static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
6402     +static int put_v4l2_buffer32(struct v4l2_buffer __user *kp,
6403     + struct v4l2_buffer32 __user *up)
6404     {
6405     + u32 type;
6406     + u32 length;
6407     + enum v4l2_memory memory;
6408     struct v4l2_plane32 __user *uplane32;
6409     struct v4l2_plane __user *uplane;
6410     compat_caddr_t p;
6411     - int num_planes;
6412     int ret;
6413    
6414     - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) ||
6415     - put_user(kp->index, &up->index) ||
6416     - put_user(kp->type, &up->type) ||
6417     - put_user(kp->flags, &up->flags) ||
6418     - put_user(kp->memory, &up->memory))
6419     - return -EFAULT;
6420     + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
6421     + assign_in_user(&up->index, &kp->index) ||
6422     + get_user(type, &kp->type) ||
6423     + put_user(type, &up->type) ||
6424     + assign_in_user(&up->flags, &kp->flags) ||
6425     + get_user(memory, &kp->memory) ||
6426     + put_user(memory, &up->memory))
6427     + return -EFAULT;
6428    
6429     - if (put_user(kp->bytesused, &up->bytesused) ||
6430     - put_user(kp->field, &up->field) ||
6431     - put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
6432     - put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
6433     - copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
6434     - put_user(kp->sequence, &up->sequence) ||
6435     - put_user(kp->reserved2, &up->reserved2) ||
6436     - put_user(kp->reserved, &up->reserved) ||
6437     - put_user(kp->length, &up->length))
6438     - return -EFAULT;
6439     + if (assign_in_user(&up->bytesused, &kp->bytesused) ||
6440     + assign_in_user(&up->field, &kp->field) ||
6441     + assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) ||
6442     + assign_in_user(&up->timestamp.tv_usec, &kp->timestamp.tv_usec) ||
6443     + copy_in_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) ||
6444     + assign_in_user(&up->sequence, &kp->sequence) ||
6445     + assign_in_user(&up->reserved2, &kp->reserved2) ||
6446     + assign_in_user(&up->reserved, &kp->reserved) ||
6447     + get_user(length, &kp->length) ||
6448     + put_user(length, &up->length))
6449     + return -EFAULT;
6450     +
6451     + if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
6452     + u32 num_planes = length;
6453    
6454     - if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
6455     - num_planes = kp->length;
6456     if (num_planes == 0)
6457     return 0;
6458    
6459     - uplane = (__force struct v4l2_plane __user *)kp->m.planes;
6460     + if (get_user(uplane, ((__force struct v4l2_plane __user **)&kp->m.planes)))
6461     + return -EFAULT;
6462     if (get_user(p, &up->m.planes))
6463     return -EFAULT;
6464     uplane32 = compat_ptr(p);
6465    
6466     - while (--num_planes >= 0) {
6467     - ret = put_v4l2_plane32(uplane, uplane32, kp->memory);
6468     + while (num_planes--) {
6469     + ret = put_v4l2_plane32(uplane, uplane32, memory);
6470     if (ret)
6471     return ret;
6472     ++uplane;
6473     ++uplane32;
6474     }
6475     } else {
6476     - switch (kp->memory) {
6477     + switch (memory) {
6478     case V4L2_MEMORY_MMAP:
6479     - if (put_user(kp->m.offset, &up->m.offset))
6480     + case V4L2_MEMORY_OVERLAY:
6481     + if (assign_in_user(&up->m.offset, &kp->m.offset))
6482     return -EFAULT;
6483     break;
6484     case V4L2_MEMORY_USERPTR:
6485     - if (put_user(kp->m.userptr, &up->m.userptr))
6486     - return -EFAULT;
6487     - break;
6488     - case V4L2_MEMORY_OVERLAY:
6489     - if (put_user(kp->m.offset, &up->m.offset))
6490     + if (assign_in_user(&up->m.userptr, &kp->m.userptr))
6491     return -EFAULT;
6492     break;
6493     case V4L2_MEMORY_DMABUF:
6494     - if (put_user(kp->m.fd, &up->m.fd))
6495     + if (assign_in_user(&up->m.fd, &kp->m.fd))
6496     return -EFAULT;
6497     break;
6498     }
6499     @@ -595,30 +666,33 @@ struct v4l2_framebuffer32 {
6500     } fmt;
6501     };
6502    
6503     -static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
6504     +static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
6505     + struct v4l2_framebuffer32 __user *up)
6506     {
6507     - u32 tmp;
6508     -
6509     - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) ||
6510     - get_user(tmp, &up->base) ||
6511     - get_user(kp->capability, &up->capability) ||
6512     - get_user(kp->flags, &up->flags) ||
6513     - copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
6514     - return -EFAULT;
6515     - kp->base = (__force void *)compat_ptr(tmp);
6516     + compat_caddr_t tmp;
6517     +
6518     + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6519     + get_user(tmp, &up->base) ||
6520     + put_user((__force void *)compat_ptr(tmp), &kp->base) ||
6521     + assign_in_user(&kp->capability, &up->capability) ||
6522     + assign_in_user(&kp->flags, &up->flags) ||
6523     + copy_in_user(&kp->fmt, &up->fmt, sizeof(kp->fmt)))
6524     + return -EFAULT;
6525     return 0;
6526     }
6527    
6528     -static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
6529     +static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
6530     + struct v4l2_framebuffer32 __user *up)
6531     {
6532     - u32 tmp = (u32)((unsigned long)kp->base);
6533     -
6534     - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) ||
6535     - put_user(tmp, &up->base) ||
6536     - put_user(kp->capability, &up->capability) ||
6537     - put_user(kp->flags, &up->flags) ||
6538     - copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt)))
6539     - return -EFAULT;
6540     + void *base;
6541     +
6542     + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
6543     + get_user(base, &kp->base) ||
6544     + put_user(ptr_to_compat(base), &up->base) ||
6545     + assign_in_user(&up->capability, &kp->capability) ||
6546     + assign_in_user(&up->flags, &kp->flags) ||
6547     + copy_in_user(&up->fmt, &kp->fmt, sizeof(kp->fmt)))
6548     + return -EFAULT;
6549     return 0;
6550     }
6551    
6552     @@ -634,18 +708,22 @@ struct v4l2_input32 {
6553     __u32 reserved[3];
6554     };
6555    
6556     -/* The 64-bit v4l2_input struct has extra padding at the end of the struct.
6557     - Otherwise it is identical to the 32-bit version. */
6558     -static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
6559     +/*
6560     + * The 64-bit v4l2_input struct has extra padding at the end of the struct.
6561     + * Otherwise it is identical to the 32-bit version.
6562     + */
6563     +static inline int get_v4l2_input32(struct v4l2_input __user *kp,
6564     + struct v4l2_input32 __user *up)
6565     {
6566     - if (copy_from_user(kp, up, sizeof(struct v4l2_input32)))
6567     + if (copy_in_user(kp, up, sizeof(*up)))
6568     return -EFAULT;
6569     return 0;
6570     }
6571    
6572     -static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
6573     +static inline int put_v4l2_input32(struct v4l2_input __user *kp,
6574     + struct v4l2_input32 __user *up)
6575     {
6576     - if (copy_to_user(up, kp, sizeof(struct v4l2_input32)))
6577     + if (copy_in_user(up, kp, sizeof(*up)))
6578     return -EFAULT;
6579     return 0;
6580     }
6581     @@ -669,60 +747,95 @@ struct v4l2_ext_control32 {
6582     };
6583     } __attribute__ ((packed));
6584    
6585     -/* The following function really belong in v4l2-common, but that causes
6586     - a circular dependency between modules. We need to think about this, but
6587     - for now this will do. */
6588     -
6589     -/* Return non-zero if this control is a pointer type. Currently only
6590     - type STRING is a pointer type. */
6591     -static inline int ctrl_is_pointer(u32 id)
6592     +/* Return true if this control is a pointer type. */
6593     +static inline bool ctrl_is_pointer(struct file *file, u32 id)
6594     {
6595     - switch (id) {
6596     - case V4L2_CID_RDS_TX_PS_NAME:
6597     - case V4L2_CID_RDS_TX_RADIO_TEXT:
6598     - return 1;
6599     - default:
6600     - return 0;
6601     + struct video_device *vdev = video_devdata(file);
6602     + struct v4l2_fh *fh = NULL;
6603     + struct v4l2_ctrl_handler *hdl = NULL;
6604     + struct v4l2_query_ext_ctrl qec = { id };
6605     + const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
6606     +
6607     + if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
6608     + fh = file->private_data;
6609     +
6610     + if (fh && fh->ctrl_handler)
6611     + hdl = fh->ctrl_handler;
6612     + else if (vdev->ctrl_handler)
6613     + hdl = vdev->ctrl_handler;
6614     +
6615     + if (hdl) {
6616     + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id);
6617     +
6618     + return ctrl && ctrl->is_ptr;
6619     }
6620     +
6621     + if (!ops || !ops->vidioc_query_ext_ctrl)
6622     + return false;
6623     +
6624     + return !ops->vidioc_query_ext_ctrl(file, fh, &qec) &&
6625     + (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD);
6626     +}
6627     +
6628     +static int bufsize_v4l2_ext_controls(struct v4l2_ext_controls32 __user *up,
6629     + u32 *size)
6630     +{
6631     + u32 count;
6632     +
6633     + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6634     + get_user(count, &up->count))
6635     + return -EFAULT;
6636     + if (count > V4L2_CID_MAX_CTRLS)
6637     + return -EINVAL;
6638     + *size = count * sizeof(struct v4l2_ext_control);
6639     + return 0;
6640     }
6641    
6642     -static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
6643     +static int get_v4l2_ext_controls32(struct file *file,
6644     + struct v4l2_ext_controls __user *kp,
6645     + struct v4l2_ext_controls32 __user *up,
6646     + void __user *aux_buf, u32 aux_space)
6647     {
6648     struct v4l2_ext_control32 __user *ucontrols;
6649     struct v4l2_ext_control __user *kcontrols;
6650     - unsigned int n;
6651     + u32 count;
6652     + u32 n;
6653     compat_caddr_t p;
6654    
6655     - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
6656     - get_user(kp->which, &up->which) ||
6657     - get_user(kp->count, &up->count) ||
6658     - get_user(kp->error_idx, &up->error_idx) ||
6659     - copy_from_user(kp->reserved, up->reserved,
6660     - sizeof(kp->reserved)))
6661     - return -EFAULT;
6662     - if (kp->count == 0) {
6663     - kp->controls = NULL;
6664     - return 0;
6665     - } else if (kp->count > V4L2_CID_MAX_CTRLS) {
6666     + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6667     + assign_in_user(&kp->which, &up->which) ||
6668     + get_user(count, &up->count) ||
6669     + put_user(count, &kp->count) ||
6670     + assign_in_user(&kp->error_idx, &up->error_idx) ||
6671     + copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
6672     + return -EFAULT;
6673     +
6674     + if (count == 0)
6675     + return put_user(NULL, &kp->controls);
6676     + if (count > V4L2_CID_MAX_CTRLS)
6677     return -EINVAL;
6678     - }
6679     if (get_user(p, &up->controls))
6680     return -EFAULT;
6681     ucontrols = compat_ptr(p);
6682     - if (!access_ok(VERIFY_READ, ucontrols,
6683     - kp->count * sizeof(struct v4l2_ext_control32)))
6684     + if (!access_ok(VERIFY_READ, ucontrols, count * sizeof(*ucontrols)))
6685     return -EFAULT;
6686     - kcontrols = compat_alloc_user_space(kp->count *
6687     - sizeof(struct v4l2_ext_control));
6688     - kp->controls = (__force struct v4l2_ext_control *)kcontrols;
6689     - for (n = 0; n < kp->count; n++) {
6690     + if (aux_space < count * sizeof(*kcontrols))
6691     + return -EFAULT;
6692     + kcontrols = aux_buf;
6693     + if (put_user((__force struct v4l2_ext_control *)kcontrols,
6694     + &kp->controls))
6695     + return -EFAULT;
6696     +
6697     + for (n = 0; n < count; n++) {
6698     u32 id;
6699    
6700     if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
6701     return -EFAULT;
6702     +
6703     if (get_user(id, &kcontrols->id))
6704     return -EFAULT;
6705     - if (ctrl_is_pointer(id)) {
6706     +
6707     + if (ctrl_is_pointer(file, id)) {
6708     void __user *s;
6709    
6710     if (get_user(p, &ucontrols->string))
6711     @@ -737,43 +850,55 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
6712     return 0;
6713     }
6714    
6715     -static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
6716     +static int put_v4l2_ext_controls32(struct file *file,
6717     + struct v4l2_ext_controls __user *kp,
6718     + struct v4l2_ext_controls32 __user *up)
6719     {
6720     struct v4l2_ext_control32 __user *ucontrols;
6721     - struct v4l2_ext_control __user *kcontrols =
6722     - (__force struct v4l2_ext_control __user *)kp->controls;
6723     - int n = kp->count;
6724     + struct v4l2_ext_control __user *kcontrols;
6725     + u32 count;
6726     + u32 n;
6727     compat_caddr_t p;
6728    
6729     - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) ||
6730     - put_user(kp->which, &up->which) ||
6731     - put_user(kp->count, &up->count) ||
6732     - put_user(kp->error_idx, &up->error_idx) ||
6733     - copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
6734     - return -EFAULT;
6735     - if (!kp->count)
6736     - return 0;
6737     + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
6738     + assign_in_user(&up->which, &kp->which) ||
6739     + get_user(count, &kp->count) ||
6740     + put_user(count, &up->count) ||
6741     + assign_in_user(&up->error_idx, &kp->error_idx) ||
6742     + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)) ||
6743     + get_user(kcontrols, &kp->controls))
6744     + return -EFAULT;
6745    
6746     + if (!count)
6747     + return 0;
6748     if (get_user(p, &up->controls))
6749     return -EFAULT;
6750     ucontrols = compat_ptr(p);
6751     - if (!access_ok(VERIFY_WRITE, ucontrols,
6752     - n * sizeof(struct v4l2_ext_control32)))
6753     + if (!access_ok(VERIFY_WRITE, ucontrols, count * sizeof(*ucontrols)))
6754     return -EFAULT;
6755    
6756     - while (--n >= 0) {
6757     - unsigned size = sizeof(*ucontrols);
6758     + for (n = 0; n < count; n++) {
6759     + unsigned int size = sizeof(*ucontrols);
6760     u32 id;
6761    
6762     - if (get_user(id, &kcontrols->id))
6763     + if (get_user(id, &kcontrols->id) ||
6764     + put_user(id, &ucontrols->id) ||
6765     + assign_in_user(&ucontrols->size, &kcontrols->size) ||
6766     + copy_in_user(&ucontrols->reserved2, &kcontrols->reserved2,
6767     + sizeof(ucontrols->reserved2)))
6768     return -EFAULT;
6769     - /* Do not modify the pointer when copying a pointer control.
6770     - The contents of the pointer was changed, not the pointer
6771     - itself. */
6772     - if (ctrl_is_pointer(id))
6773     +
6774     + /*
6775     + * Do not modify the pointer when copying a pointer control.
6776     + * The contents of the pointer was changed, not the pointer
6777     + * itself.
6778     + */
6779     + if (ctrl_is_pointer(file, id))
6780     size -= sizeof(ucontrols->value64);
6781     +
6782     if (copy_in_user(ucontrols, kcontrols, size))
6783     return -EFAULT;
6784     +
6785     ucontrols++;
6786     kcontrols++;
6787     }
6788     @@ -793,18 +918,19 @@ struct v4l2_event32 {
6789     __u32 reserved[8];
6790     };
6791    
6792     -static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
6793     +static int put_v4l2_event32(struct v4l2_event __user *kp,
6794     + struct v4l2_event32 __user *up)
6795     {
6796     - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
6797     - put_user(kp->type, &up->type) ||
6798     - copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
6799     - put_user(kp->pending, &up->pending) ||
6800     - put_user(kp->sequence, &up->sequence) ||
6801     - put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
6802     - put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
6803     - put_user(kp->id, &up->id) ||
6804     - copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
6805     - return -EFAULT;
6806     + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
6807     + assign_in_user(&up->type, &kp->type) ||
6808     + copy_in_user(&up->u, &kp->u, sizeof(kp->u)) ||
6809     + assign_in_user(&up->pending, &kp->pending) ||
6810     + assign_in_user(&up->sequence, &kp->sequence) ||
6811     + assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) ||
6812     + assign_in_user(&up->timestamp.tv_nsec, &kp->timestamp.tv_nsec) ||
6813     + assign_in_user(&up->id, &kp->id) ||
6814     + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
6815     + return -EFAULT;
6816     return 0;
6817     }
6818    
6819     @@ -816,32 +942,35 @@ struct v4l2_edid32 {
6820     compat_caddr_t edid;
6821     };
6822    
6823     -static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
6824     +static int get_v4l2_edid32(struct v4l2_edid __user *kp,
6825     + struct v4l2_edid32 __user *up)
6826     {
6827     - u32 tmp;
6828     -
6829     - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) ||
6830     - get_user(kp->pad, &up->pad) ||
6831     - get_user(kp->start_block, &up->start_block) ||
6832     - get_user(kp->blocks, &up->blocks) ||
6833     - get_user(tmp, &up->edid) ||
6834     - copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
6835     - return -EFAULT;
6836     - kp->edid = (__force u8 *)compat_ptr(tmp);
6837     + compat_uptr_t tmp;
6838     +
6839     + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6840     + assign_in_user(&kp->pad, &up->pad) ||
6841     + assign_in_user(&kp->start_block, &up->start_block) ||
6842     + assign_in_user(&kp->blocks, &up->blocks) ||
6843     + get_user(tmp, &up->edid) ||
6844     + put_user(compat_ptr(tmp), &kp->edid) ||
6845     + copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
6846     + return -EFAULT;
6847     return 0;
6848     }
6849    
6850     -static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
6851     +static int put_v4l2_edid32(struct v4l2_edid __user *kp,
6852     + struct v4l2_edid32 __user *up)
6853     {
6854     - u32 tmp = (u32)((unsigned long)kp->edid);
6855     -
6856     - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) ||
6857     - put_user(kp->pad, &up->pad) ||
6858     - put_user(kp->start_block, &up->start_block) ||
6859     - put_user(kp->blocks, &up->blocks) ||
6860     - put_user(tmp, &up->edid) ||
6861     - copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
6862     - return -EFAULT;
6863     + void *edid;
6864     +
6865     + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
6866     + assign_in_user(&up->pad, &kp->pad) ||
6867     + assign_in_user(&up->start_block, &kp->start_block) ||
6868     + assign_in_user(&up->blocks, &kp->blocks) ||
6869     + get_user(edid, &kp->edid) ||
6870     + put_user(ptr_to_compat(edid), &up->edid) ||
6871     + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
6872     + return -EFAULT;
6873     return 0;
6874     }
6875    
6876     @@ -873,22 +1002,23 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
6877     #define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32)
6878     #define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32)
6879    
6880     +static int alloc_userspace(unsigned int size, u32 aux_space,
6881     + void __user **up_native)
6882     +{
6883     + *up_native = compat_alloc_user_space(size + aux_space);
6884     + if (!*up_native)
6885     + return -ENOMEM;
6886     + if (clear_user(*up_native, size))
6887     + return -EFAULT;
6888     + return 0;
6889     +}
6890     +
6891     static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
6892     {
6893     - union {
6894     - struct v4l2_format v2f;
6895     - struct v4l2_buffer v2b;
6896     - struct v4l2_framebuffer v2fb;
6897     - struct v4l2_input v2i;
6898     - struct v4l2_standard v2s;
6899     - struct v4l2_ext_controls v2ecs;
6900     - struct v4l2_event v2ev;
6901     - struct v4l2_create_buffers v2crt;
6902     - struct v4l2_edid v2edid;
6903     - unsigned long vx;
6904     - int vi;
6905     - } karg;
6906     void __user *up = compat_ptr(arg);
6907     + void __user *up_native = NULL;
6908     + void __user *aux_buf;
6909     + u32 aux_space;
6910     int compatible_arg = 1;
6911     long err = 0;
6912    
6913     @@ -927,30 +1057,52 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
6914     case VIDIOC_STREAMOFF:
6915     case VIDIOC_S_INPUT:
6916     case VIDIOC_S_OUTPUT:
6917     - err = get_user(karg.vi, (s32 __user *)up);
6918     + err = alloc_userspace(sizeof(unsigned int), 0, &up_native);
6919     + if (!err && assign_in_user((unsigned int __user *)up_native,
6920     + (compat_uint_t __user *)up))
6921     + err = -EFAULT;
6922     compatible_arg = 0;
6923     break;
6924    
6925     case VIDIOC_G_INPUT:
6926     case VIDIOC_G_OUTPUT:
6927     + err = alloc_userspace(sizeof(unsigned int), 0, &up_native);
6928     compatible_arg = 0;
6929     break;
6930    
6931     case VIDIOC_G_EDID:
6932     case VIDIOC_S_EDID:
6933     - err = get_v4l2_edid32(&karg.v2edid, up);
6934     + err = alloc_userspace(sizeof(struct v4l2_edid), 0, &up_native);
6935     + if (!err)
6936     + err = get_v4l2_edid32(up_native, up);
6937     compatible_arg = 0;
6938     break;
6939    
6940     case VIDIOC_G_FMT:
6941     case VIDIOC_S_FMT:
6942     case VIDIOC_TRY_FMT:
6943     - err = get_v4l2_format32(&karg.v2f, up);
6944     + err = bufsize_v4l2_format(up, &aux_space);
6945     + if (!err)
6946     + err = alloc_userspace(sizeof(struct v4l2_format),
6947     + aux_space, &up_native);
6948     + if (!err) {
6949     + aux_buf = up_native + sizeof(struct v4l2_format);
6950     + err = get_v4l2_format32(up_native, up,
6951     + aux_buf, aux_space);
6952     + }
6953     compatible_arg = 0;
6954     break;
6955    
6956     case VIDIOC_CREATE_BUFS:
6957     - err = get_v4l2_create32(&karg.v2crt, up);
6958     + err = bufsize_v4l2_create(up, &aux_space);
6959     + if (!err)
6960     + err = alloc_userspace(sizeof(struct v4l2_create_buffers),
6961     + aux_space, &up_native);
6962     + if (!err) {
6963     + aux_buf = up_native + sizeof(struct v4l2_create_buffers);
6964     + err = get_v4l2_create32(up_native, up,
6965     + aux_buf, aux_space);
6966     + }
6967     compatible_arg = 0;
6968     break;
6969    
6970     @@ -958,36 +1110,63 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
6971     case VIDIOC_QUERYBUF:
6972     case VIDIOC_QBUF:
6973     case VIDIOC_DQBUF:
6974     - err = get_v4l2_buffer32(&karg.v2b, up);
6975     + err = bufsize_v4l2_buffer(up, &aux_space);
6976     + if (!err)
6977     + err = alloc_userspace(sizeof(struct v4l2_buffer),
6978     + aux_space, &up_native);
6979     + if (!err) {
6980     + aux_buf = up_native + sizeof(struct v4l2_buffer);
6981     + err = get_v4l2_buffer32(up_native, up,
6982     + aux_buf, aux_space);
6983     + }
6984     compatible_arg = 0;
6985     break;
6986    
6987     case VIDIOC_S_FBUF:
6988     - err = get_v4l2_framebuffer32(&karg.v2fb, up);
6989     + err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
6990     + &up_native);
6991     + if (!err)
6992     + err = get_v4l2_framebuffer32(up_native, up);
6993     compatible_arg = 0;
6994     break;
6995    
6996     case VIDIOC_G_FBUF:
6997     + err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
6998     + &up_native);
6999     compatible_arg = 0;
7000     break;
7001    
7002     case VIDIOC_ENUMSTD:
7003     - err = get_v4l2_standard32(&karg.v2s, up);
7004     + err = alloc_userspace(sizeof(struct v4l2_standard), 0,
7005     + &up_native);
7006     + if (!err)
7007     + err = get_v4l2_standard32(up_native, up);
7008     compatible_arg = 0;
7009     break;
7010    
7011     case VIDIOC_ENUMINPUT:
7012     - err = get_v4l2_input32(&karg.v2i, up);
7013     + err = alloc_userspace(sizeof(struct v4l2_input), 0, &up_native);
7014     + if (!err)
7015     + err = get_v4l2_input32(up_native, up);
7016     compatible_arg = 0;
7017     break;
7018    
7019     case VIDIOC_G_EXT_CTRLS:
7020     case VIDIOC_S_EXT_CTRLS:
7021     case VIDIOC_TRY_EXT_CTRLS:
7022     - err = get_v4l2_ext_controls32(&karg.v2ecs, up);
7023     + err = bufsize_v4l2_ext_controls(up, &aux_space);
7024     + if (!err)
7025     + err = alloc_userspace(sizeof(struct v4l2_ext_controls),
7026     + aux_space, &up_native);
7027     + if (!err) {
7028     + aux_buf = up_native + sizeof(struct v4l2_ext_controls);
7029     + err = get_v4l2_ext_controls32(file, up_native, up,
7030     + aux_buf, aux_space);
7031     + }
7032     compatible_arg = 0;
7033     break;
7034     case VIDIOC_DQEVENT:
7035     + err = alloc_userspace(sizeof(struct v4l2_event), 0, &up_native);
7036     compatible_arg = 0;
7037     break;
7038     }
7039     @@ -996,26 +1175,26 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
7040    
7041     if (compatible_arg)
7042     err = native_ioctl(file, cmd, (unsigned long)up);
7043     - else {
7044     - mm_segment_t old_fs = get_fs();
7045     + else
7046     + err = native_ioctl(file, cmd, (unsigned long)up_native);
7047    
7048     - set_fs(KERNEL_DS);
7049     - err = native_ioctl(file, cmd, (unsigned long)&karg);
7050     - set_fs(old_fs);
7051     - }
7052     + if (err == -ENOTTY)
7053     + return err;
7054    
7055     - /* Special case: even after an error we need to put the
7056     - results back for these ioctls since the error_idx will
7057     - contain information on which control failed. */
7058     + /*
7059     + * Special case: even after an error we need to put the
7060     + * results back for these ioctls since the error_idx will
7061     + * contain information on which control failed.
7062     + */
7063     switch (cmd) {
7064     case VIDIOC_G_EXT_CTRLS:
7065     case VIDIOC_S_EXT_CTRLS:
7066     case VIDIOC_TRY_EXT_CTRLS:
7067     - if (put_v4l2_ext_controls32(&karg.v2ecs, up))
7068     + if (put_v4l2_ext_controls32(file, up_native, up))
7069     err = -EFAULT;
7070     break;
7071     case VIDIOC_S_EDID:
7072     - if (put_v4l2_edid32(&karg.v2edid, up))
7073     + if (put_v4l2_edid32(up_native, up))
7074     err = -EFAULT;
7075     break;
7076     }
7077     @@ -1027,43 +1206,46 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
7078     case VIDIOC_S_OUTPUT:
7079     case VIDIOC_G_INPUT:
7080     case VIDIOC_G_OUTPUT:
7081     - err = put_user(((s32)karg.vi), (s32 __user *)up);
7082     + if (assign_in_user((compat_uint_t __user *)up,
7083     + ((unsigned int __user *)up_native)))
7084     + err = -EFAULT;
7085     break;
7086    
7087     case VIDIOC_G_FBUF:
7088     - err = put_v4l2_framebuffer32(&karg.v2fb, up);
7089     + err = put_v4l2_framebuffer32(up_native, up);
7090     break;
7091    
7092     case VIDIOC_DQEVENT:
7093     - err = put_v4l2_event32(&karg.v2ev, up);
7094     + err = put_v4l2_event32(up_native, up);
7095     break;
7096    
7097     case VIDIOC_G_EDID:
7098     - err = put_v4l2_edid32(&karg.v2edid, up);
7099     + err = put_v4l2_edid32(up_native, up);
7100     break;
7101    
7102     case VIDIOC_G_FMT:
7103     case VIDIOC_S_FMT:
7104     case VIDIOC_TRY_FMT:
7105     - err = put_v4l2_format32(&karg.v2f, up);
7106     + err = put_v4l2_format32(up_native, up);
7107     break;
7108    
7109     case VIDIOC_CREATE_BUFS:
7110     - err = put_v4l2_create32(&karg.v2crt, up);
7111     + err = put_v4l2_create32(up_native, up);
7112     break;
7113    
7114     + case VIDIOC_PREPARE_BUF:
7115     case VIDIOC_QUERYBUF:
7116     case VIDIOC_QBUF:
7117     case VIDIOC_DQBUF:
7118     - err = put_v4l2_buffer32(&karg.v2b, up);
7119     + err = put_v4l2_buffer32(up_native, up);
7120     break;
7121    
7122     case VIDIOC_ENUMSTD:
7123     - err = put_v4l2_standard32(&karg.v2s, up);
7124     + err = put_v4l2_standard32(up_native, up);
7125     break;
7126    
7127     case VIDIOC_ENUMINPUT:
7128     - err = put_v4l2_input32(&karg.v2i, up);
7129     + err = put_v4l2_input32(up_native, up);
7130     break;
7131     }
7132     return err;
7133     diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
7134     index 79614992ee21..89e0878ce0a0 100644
7135     --- a/drivers/media/v4l2-core/v4l2-ioctl.c
7136     +++ b/drivers/media/v4l2-core/v4l2-ioctl.c
7137     @@ -1311,52 +1311,50 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
7138     struct file *file, void *fh, void *arg)
7139     {
7140     struct v4l2_fmtdesc *p = arg;
7141     - struct video_device *vfd = video_devdata(file);
7142     - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
7143     - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
7144     - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
7145     - bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
7146     - bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
7147     - int ret = -EINVAL;
7148     + int ret = check_fmt(file, p->type);
7149     +
7150     + if (ret)
7151     + return ret;
7152     + ret = -EINVAL;
7153    
7154     switch (p->type) {
7155     case V4L2_BUF_TYPE_VIDEO_CAPTURE:
7156     - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_enum_fmt_vid_cap))
7157     + if (unlikely(!ops->vidioc_enum_fmt_vid_cap))
7158     break;
7159     ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
7160     break;
7161     case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
7162     - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_cap_mplane))
7163     + if (unlikely(!ops->vidioc_enum_fmt_vid_cap_mplane))
7164     break;
7165     ret = ops->vidioc_enum_fmt_vid_cap_mplane(file, fh, arg);
7166     break;
7167     case V4L2_BUF_TYPE_VIDEO_OVERLAY:
7168     - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_overlay))
7169     + if (unlikely(!ops->vidioc_enum_fmt_vid_overlay))
7170     break;
7171     ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg);
7172     break;
7173     case V4L2_BUF_TYPE_VIDEO_OUTPUT:
7174     - if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out))
7175     + if (unlikely(!ops->vidioc_enum_fmt_vid_out))
7176     break;
7177     ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg);
7178     break;
7179     case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
7180     - if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out_mplane))
7181     + if (unlikely(!ops->vidioc_enum_fmt_vid_out_mplane))
7182     break;
7183     ret = ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg);
7184     break;
7185     case V4L2_BUF_TYPE_SDR_CAPTURE:
7186     - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_enum_fmt_sdr_cap))
7187     + if (unlikely(!ops->vidioc_enum_fmt_sdr_cap))
7188     break;
7189     ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg);
7190     break;
7191     case V4L2_BUF_TYPE_SDR_OUTPUT:
7192     - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_enum_fmt_sdr_out))
7193     + if (unlikely(!ops->vidioc_enum_fmt_sdr_out))
7194     break;
7195     ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg);
7196     break;
7197     case V4L2_BUF_TYPE_META_CAPTURE:
7198     - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_meta_cap))
7199     + if (unlikely(!ops->vidioc_enum_fmt_meta_cap))
7200     break;
7201     ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg);
7202     break;
7203     @@ -1370,13 +1368,10 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
7204     struct file *file, void *fh, void *arg)
7205     {
7206     struct v4l2_format *p = arg;
7207     - struct video_device *vfd = video_devdata(file);
7208     - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
7209     - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
7210     - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
7211     - bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
7212     - bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
7213     - int ret;
7214     + int ret = check_fmt(file, p->type);
7215     +
7216     + if (ret)
7217     + return ret;
7218    
7219     /*
7220     * fmt can't be cleared for these overlay types due to the 'clips'
7221     @@ -1404,7 +1399,7 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
7222    
7223     switch (p->type) {
7224     case V4L2_BUF_TYPE_VIDEO_CAPTURE:
7225     - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_g_fmt_vid_cap))
7226     + if (unlikely(!ops->vidioc_g_fmt_vid_cap))
7227     break;
7228     p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7229     ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
7230     @@ -1412,23 +1407,15 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
7231     p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7232     return ret;
7233     case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
7234     - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_cap_mplane))
7235     - break;
7236     return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
7237     case V4L2_BUF_TYPE_VIDEO_OVERLAY:
7238     - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_overlay))
7239     - break;
7240     return ops->vidioc_g_fmt_vid_overlay(file, fh, arg);
7241     case V4L2_BUF_TYPE_VBI_CAPTURE:
7242     - if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_vbi_cap))
7243     - break;
7244     return ops->vidioc_g_fmt_vbi_cap(file, fh, arg);
7245     case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
7246     - if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_cap))
7247     - break;
7248     return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg);
7249     case V4L2_BUF_TYPE_VIDEO_OUTPUT:
7250     - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out))
7251     + if (unlikely(!ops->vidioc_g_fmt_vid_out))
7252     break;
7253     p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7254     ret = ops->vidioc_g_fmt_vid_out(file, fh, arg);
7255     @@ -1436,32 +1423,18 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
7256     p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7257     return ret;
7258     case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
7259     - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_mplane))
7260     - break;
7261     return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg);
7262     case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
7263     - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_overlay))
7264     - break;
7265     return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg);
7266     case V4L2_BUF_TYPE_VBI_OUTPUT:
7267     - if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_vbi_out))
7268     - break;
7269     return ops->vidioc_g_fmt_vbi_out(file, fh, arg);
7270     case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
7271     - if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_out))
7272     - break;
7273     return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg);
7274     case V4L2_BUF_TYPE_SDR_CAPTURE:
7275     - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_g_fmt_sdr_cap))
7276     - break;
7277     return ops->vidioc_g_fmt_sdr_cap(file, fh, arg);
7278     case V4L2_BUF_TYPE_SDR_OUTPUT:
7279     - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_g_fmt_sdr_out))
7280     - break;
7281     return ops->vidioc_g_fmt_sdr_out(file, fh, arg);
7282     case V4L2_BUF_TYPE_META_CAPTURE:
7283     - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_meta_cap))
7284     - break;
7285     return ops->vidioc_g_fmt_meta_cap(file, fh, arg);
7286     }
7287     return -EINVAL;
7288     @@ -1487,12 +1460,10 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
7289     {
7290     struct v4l2_format *p = arg;
7291     struct video_device *vfd = video_devdata(file);
7292     - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
7293     - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
7294     - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
7295     - bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
7296     - bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
7297     - int ret;
7298     + int ret = check_fmt(file, p->type);
7299     +
7300     + if (ret)
7301     + return ret;
7302    
7303     ret = v4l_enable_media_source(vfd);
7304     if (ret)
7305     @@ -1501,37 +1472,37 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
7306    
7307     switch (p->type) {
7308     case V4L2_BUF_TYPE_VIDEO_CAPTURE:
7309     - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_s_fmt_vid_cap))
7310     + if (unlikely(!ops->vidioc_s_fmt_vid_cap))
7311     break;
7312     CLEAR_AFTER_FIELD(p, fmt.pix);
7313     ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg);
7314     /* just in case the driver zeroed it again */
7315     p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7316     - if (is_tch)
7317     + if (vfd->vfl_type == VFL_TYPE_TOUCH)
7318     v4l_pix_format_touch(&p->fmt.pix);
7319     return ret;
7320     case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
7321     - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap_mplane))
7322     + if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
7323     break;
7324     CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
7325     return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
7326     case V4L2_BUF_TYPE_VIDEO_OVERLAY:
7327     - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_overlay))
7328     + if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
7329     break;
7330     CLEAR_AFTER_FIELD(p, fmt.win);
7331     return ops->vidioc_s_fmt_vid_overlay(file, fh, arg);
7332     case V4L2_BUF_TYPE_VBI_CAPTURE:
7333     - if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_vbi_cap))
7334     + if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
7335     break;
7336     CLEAR_AFTER_FIELD(p, fmt.vbi);
7337     return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
7338     case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
7339     - if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_cap))
7340     + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
7341     break;
7342     CLEAR_AFTER_FIELD(p, fmt.sliced);
7343     return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
7344     case V4L2_BUF_TYPE_VIDEO_OUTPUT:
7345     - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out))
7346     + if (unlikely(!ops->vidioc_s_fmt_vid_out))
7347     break;
7348     CLEAR_AFTER_FIELD(p, fmt.pix);
7349     ret = ops->vidioc_s_fmt_vid_out(file, fh, arg);
7350     @@ -1539,37 +1510,37 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
7351     p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7352     return ret;
7353     case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
7354     - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_mplane))
7355     + if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
7356     break;
7357     CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
7358     return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
7359     case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
7360     - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_overlay))
7361     + if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
7362     break;
7363     CLEAR_AFTER_FIELD(p, fmt.win);
7364     return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg);
7365     case V4L2_BUF_TYPE_VBI_OUTPUT:
7366     - if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_vbi_out))
7367     + if (unlikely(!ops->vidioc_s_fmt_vbi_out))
7368     break;
7369     CLEAR_AFTER_FIELD(p, fmt.vbi);
7370     return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
7371     case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
7372     - if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_out))
7373     + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
7374     break;
7375     CLEAR_AFTER_FIELD(p, fmt.sliced);
7376     return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
7377     case V4L2_BUF_TYPE_SDR_CAPTURE:
7378     - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_s_fmt_sdr_cap))
7379     + if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
7380     break;
7381     CLEAR_AFTER_FIELD(p, fmt.sdr);
7382     return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
7383     case V4L2_BUF_TYPE_SDR_OUTPUT:
7384     - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_s_fmt_sdr_out))
7385     + if (unlikely(!ops->vidioc_s_fmt_sdr_out))
7386     break;
7387     CLEAR_AFTER_FIELD(p, fmt.sdr);
7388     return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
7389     case V4L2_BUF_TYPE_META_CAPTURE:
7390     - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_meta_cap))
7391     + if (unlikely(!ops->vidioc_s_fmt_meta_cap))
7392     break;
7393     CLEAR_AFTER_FIELD(p, fmt.meta);
7394     return ops->vidioc_s_fmt_meta_cap(file, fh, arg);
7395     @@ -1581,19 +1552,16 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
7396     struct file *file, void *fh, void *arg)
7397     {
7398     struct v4l2_format *p = arg;
7399     - struct video_device *vfd = video_devdata(file);
7400     - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
7401     - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
7402     - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
7403     - bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
7404     - bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
7405     - int ret;
7406     + int ret = check_fmt(file, p->type);
7407     +
7408     + if (ret)
7409     + return ret;
7410    
7411     v4l_sanitize_format(p);
7412    
7413     switch (p->type) {
7414     case V4L2_BUF_TYPE_VIDEO_CAPTURE:
7415     - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_try_fmt_vid_cap))
7416     + if (unlikely(!ops->vidioc_try_fmt_vid_cap))
7417     break;
7418     CLEAR_AFTER_FIELD(p, fmt.pix);
7419     ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg);
7420     @@ -1601,27 +1569,27 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
7421     p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7422     return ret;
7423     case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
7424     - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap_mplane))
7425     + if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
7426     break;
7427     CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
7428     return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
7429     case V4L2_BUF_TYPE_VIDEO_OVERLAY:
7430     - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_overlay))
7431     + if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
7432     break;
7433     CLEAR_AFTER_FIELD(p, fmt.win);
7434     return ops->vidioc_try_fmt_vid_overlay(file, fh, arg);
7435     case V4L2_BUF_TYPE_VBI_CAPTURE:
7436     - if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_vbi_cap))
7437     + if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
7438     break;
7439     CLEAR_AFTER_FIELD(p, fmt.vbi);
7440     return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
7441     case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
7442     - if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_cap))
7443     + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
7444     break;
7445     CLEAR_AFTER_FIELD(p, fmt.sliced);
7446     return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
7447     case V4L2_BUF_TYPE_VIDEO_OUTPUT:
7448     - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out))
7449     + if (unlikely(!ops->vidioc_try_fmt_vid_out))
7450     break;
7451     CLEAR_AFTER_FIELD(p, fmt.pix);
7452     ret = ops->vidioc_try_fmt_vid_out(file, fh, arg);
7453     @@ -1629,37 +1597,37 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
7454     p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7455     return ret;
7456     case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
7457     - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_mplane))
7458     + if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
7459     break;
7460     CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
7461     return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
7462     case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
7463     - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_overlay))
7464     + if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
7465     break;
7466     CLEAR_AFTER_FIELD(p, fmt.win);
7467     return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg);
7468     case V4L2_BUF_TYPE_VBI_OUTPUT:
7469     - if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_vbi_out))
7470     + if (unlikely(!ops->vidioc_try_fmt_vbi_out))
7471     break;
7472     CLEAR_AFTER_FIELD(p, fmt.vbi);
7473     return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
7474     case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
7475     - if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_out))
7476     + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
7477     break;
7478     CLEAR_AFTER_FIELD(p, fmt.sliced);
7479     return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
7480     case V4L2_BUF_TYPE_SDR_CAPTURE:
7481     - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_try_fmt_sdr_cap))
7482     + if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
7483     break;
7484     CLEAR_AFTER_FIELD(p, fmt.sdr);
7485     return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
7486     case V4L2_BUF_TYPE_SDR_OUTPUT:
7487     - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_try_fmt_sdr_out))
7488     + if (unlikely(!ops->vidioc_try_fmt_sdr_out))
7489     break;
7490     CLEAR_AFTER_FIELD(p, fmt.sdr);
7491     return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
7492     case V4L2_BUF_TYPE_META_CAPTURE:
7493     - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_meta_cap))
7494     + if (unlikely(!ops->vidioc_try_fmt_meta_cap))
7495     break;
7496     CLEAR_AFTER_FIELD(p, fmt.meta);
7497     return ops->vidioc_try_fmt_meta_cap(file, fh, arg);
7498     @@ -2927,8 +2895,11 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
7499    
7500     /* Handles IOCTL */
7501     err = func(file, cmd, parg);
7502     - if (err == -ENOIOCTLCMD)
7503     + if (err == -ENOTTY || err == -ENOIOCTLCMD) {
7504     err = -ENOTTY;
7505     + goto out;
7506     + }
7507     +
7508     if (err == 0) {
7509     if (cmd == VIDIOC_DQBUF)
7510     trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
7511     diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
7512     index dd56a671ea42..2a978d9832a7 100644
7513     --- a/drivers/mtd/nand/brcmnand/brcmnand.c
7514     +++ b/drivers/mtd/nand/brcmnand/brcmnand.c
7515     @@ -2193,16 +2193,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
7516     if (ctrl->nand_version >= 0x0702)
7517     tmp |= ACC_CONTROL_RD_ERASED;
7518     tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
7519     - if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
7520     - /*
7521     - * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
7522     - * errors
7523     - */
7524     - if (has_flash_dma(ctrl))
7525     - tmp &= ~ACC_CONTROL_PREFETCH;
7526     - else
7527     - tmp |= ACC_CONTROL_PREFETCH;
7528     - }
7529     + if (ctrl->features & BRCMNAND_HAS_PREFETCH)
7530     + tmp &= ~ACC_CONTROL_PREFETCH;
7531     +
7532     nand_writereg(ctrl, offs, tmp);
7533    
7534     return 0;
7535     diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
7536     index 6135d007a068..9c702b46c6ee 100644
7537     --- a/drivers/mtd/nand/nand_base.c
7538     +++ b/drivers/mtd/nand/nand_base.c
7539     @@ -2199,6 +2199,7 @@ EXPORT_SYMBOL(nand_write_oob_syndrome);
7540     static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
7541     struct mtd_oob_ops *ops)
7542     {
7543     + unsigned int max_bitflips = 0;
7544     int page, realpage, chipnr;
7545     struct nand_chip *chip = mtd_to_nand(mtd);
7546     struct mtd_ecc_stats stats;
7547     @@ -2256,6 +2257,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
7548     nand_wait_ready(mtd);
7549     }
7550    
7551     + max_bitflips = max_t(unsigned int, max_bitflips, ret);
7552     +
7553     readlen -= len;
7554     if (!readlen)
7555     break;
7556     @@ -2281,7 +2284,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
7557     if (mtd->ecc_stats.failed - stats.failed)
7558     return -EBADMSG;
7559    
7560     - return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
7561     + return max_bitflips;
7562     }
7563    
7564     /**
7565     diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
7566     index 82244be3e766..958974821582 100644
7567     --- a/drivers/mtd/nand/sunxi_nand.c
7568     +++ b/drivers/mtd/nand/sunxi_nand.c
7569     @@ -1853,8 +1853,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
7570    
7571     /* Add ECC info retrieval from DT */
7572     for (i = 0; i < ARRAY_SIZE(strengths); i++) {
7573     - if (ecc->strength <= strengths[i])
7574     + if (ecc->strength <= strengths[i]) {
7575     + /*
7576     + * Update ecc->strength value with the actual strength
7577     + * that will be used by the ECC engine.
7578     + */
7579     + ecc->strength = strengths[i];
7580     break;
7581     + }
7582     }
7583    
7584     if (i >= ARRAY_SIZE(strengths)) {
7585     diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
7586     index b210fdb31c98..b1fc28f63882 100644
7587     --- a/drivers/mtd/ubi/block.c
7588     +++ b/drivers/mtd/ubi/block.c
7589     @@ -99,6 +99,8 @@ struct ubiblock {
7590    
7591     /* Linked list of all ubiblock instances */
7592     static LIST_HEAD(ubiblock_devices);
7593     +static DEFINE_IDR(ubiblock_minor_idr);
7594     +/* Protects ubiblock_devices and ubiblock_minor_idr */
7595     static DEFINE_MUTEX(devices_mutex);
7596     static int ubiblock_major;
7597    
7598     @@ -351,8 +353,6 @@ static const struct blk_mq_ops ubiblock_mq_ops = {
7599     .init_request = ubiblock_init_request,
7600     };
7601    
7602     -static DEFINE_IDR(ubiblock_minor_idr);
7603     -
7604     int ubiblock_create(struct ubi_volume_info *vi)
7605     {
7606     struct ubiblock *dev;
7607     @@ -365,14 +365,15 @@ int ubiblock_create(struct ubi_volume_info *vi)
7608     /* Check that the volume isn't already handled */
7609     mutex_lock(&devices_mutex);
7610     if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
7611     - mutex_unlock(&devices_mutex);
7612     - return -EEXIST;
7613     + ret = -EEXIST;
7614     + goto out_unlock;
7615     }
7616     - mutex_unlock(&devices_mutex);
7617    
7618     dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
7619     - if (!dev)
7620     - return -ENOMEM;
7621     + if (!dev) {
7622     + ret = -ENOMEM;
7623     + goto out_unlock;
7624     + }
7625    
7626     mutex_init(&dev->dev_mutex);
7627    
7628     @@ -437,14 +438,13 @@ int ubiblock_create(struct ubi_volume_info *vi)
7629     goto out_free_queue;
7630     }
7631    
7632     - mutex_lock(&devices_mutex);
7633     list_add_tail(&dev->list, &ubiblock_devices);
7634     - mutex_unlock(&devices_mutex);
7635    
7636     /* Must be the last step: anyone can call file ops from now on */
7637     add_disk(dev->gd);
7638     dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
7639     dev->ubi_num, dev->vol_id, vi->name);
7640     + mutex_unlock(&devices_mutex);
7641     return 0;
7642    
7643     out_free_queue:
7644     @@ -457,6 +457,8 @@ int ubiblock_create(struct ubi_volume_info *vi)
7645     put_disk(dev->gd);
7646     out_free_dev:
7647     kfree(dev);
7648     +out_unlock:
7649     + mutex_unlock(&devices_mutex);
7650    
7651     return ret;
7652     }
7653     @@ -478,30 +480,36 @@ static void ubiblock_cleanup(struct ubiblock *dev)
7654     int ubiblock_remove(struct ubi_volume_info *vi)
7655     {
7656     struct ubiblock *dev;
7657     + int ret;
7658    
7659     mutex_lock(&devices_mutex);
7660     dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
7661     if (!dev) {
7662     - mutex_unlock(&devices_mutex);
7663     - return -ENODEV;
7664     + ret = -ENODEV;
7665     + goto out_unlock;
7666     }
7667    
7668     /* Found a device, let's lock it so we can check if it's busy */
7669     mutex_lock(&dev->dev_mutex);
7670     if (dev->refcnt > 0) {
7671     - mutex_unlock(&dev->dev_mutex);
7672     - mutex_unlock(&devices_mutex);
7673     - return -EBUSY;
7674     + ret = -EBUSY;
7675     + goto out_unlock_dev;
7676     }
7677    
7678     /* Remove from device list */
7679     list_del(&dev->list);
7680     - mutex_unlock(&devices_mutex);
7681     -
7682     ubiblock_cleanup(dev);
7683     mutex_unlock(&dev->dev_mutex);
7684     + mutex_unlock(&devices_mutex);
7685     +
7686     kfree(dev);
7687     return 0;
7688     +
7689     +out_unlock_dev:
7690     + mutex_unlock(&dev->dev_mutex);
7691     +out_unlock:
7692     + mutex_unlock(&devices_mutex);
7693     + return ret;
7694     }
7695    
7696     static int ubiblock_resize(struct ubi_volume_info *vi)
7697     @@ -630,6 +638,7 @@ static void ubiblock_remove_all(void)
7698     struct ubiblock *next;
7699     struct ubiblock *dev;
7700    
7701     + mutex_lock(&devices_mutex);
7702     list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
7703     /* The module is being forcefully removed */
7704     WARN_ON(dev->desc);
7705     @@ -638,6 +647,7 @@ static void ubiblock_remove_all(void)
7706     ubiblock_cleanup(dev);
7707     kfree(dev);
7708     }
7709     + mutex_unlock(&devices_mutex);
7710     }
7711    
7712     int __init ubiblock_init(void)
7713     diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
7714     index 85237cf661f9..3fd8d7ff7a02 100644
7715     --- a/drivers/mtd/ubi/vmt.c
7716     +++ b/drivers/mtd/ubi/vmt.c
7717     @@ -270,6 +270,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
7718     vol->last_eb_bytes = vol->usable_leb_size;
7719     }
7720    
7721     + /* Make volume "available" before it becomes accessible via sysfs */
7722     + spin_lock(&ubi->volumes_lock);
7723     + ubi->volumes[vol_id] = vol;
7724     + ubi->vol_count += 1;
7725     + spin_unlock(&ubi->volumes_lock);
7726     +
7727     /* Register character device for the volume */
7728     cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
7729     vol->cdev.owner = THIS_MODULE;
7730     @@ -298,11 +304,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
7731     if (err)
7732     goto out_sysfs;
7733    
7734     - spin_lock(&ubi->volumes_lock);
7735     - ubi->volumes[vol_id] = vol;
7736     - ubi->vol_count += 1;
7737     - spin_unlock(&ubi->volumes_lock);
7738     -
7739     ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
7740     self_check_volumes(ubi);
7741     return err;
7742     @@ -315,6 +316,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
7743     */
7744     cdev_device_del(&vol->cdev, &vol->dev);
7745     out_mapping:
7746     + spin_lock(&ubi->volumes_lock);
7747     + ubi->volumes[vol_id] = NULL;
7748     + ubi->vol_count -= 1;
7749     + spin_unlock(&ubi->volumes_lock);
7750     ubi_eba_destroy_table(eba_tbl);
7751     out_acc:
7752     spin_lock(&ubi->volumes_lock);
7753     diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
7754     index b5b8cd6f481c..668b46202507 100644
7755     --- a/drivers/mtd/ubi/wl.c
7756     +++ b/drivers/mtd/ubi/wl.c
7757     @@ -1528,6 +1528,46 @@ static void shutdown_work(struct ubi_device *ubi)
7758     }
7759     }
7760    
7761     +/**
7762     + * erase_aeb - erase a PEB given in UBI attach info PEB
7763     + * @ubi: UBI device description object
7764     + * @aeb: UBI attach info PEB
7765     + * @sync: If true, erase synchronously. Otherwise schedule for erasure
7766     + */
7767     +static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
7768     +{
7769     + struct ubi_wl_entry *e;
7770     + int err;
7771     +
7772     + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
7773     + if (!e)
7774     + return -ENOMEM;
7775     +
7776     + e->pnum = aeb->pnum;
7777     + e->ec = aeb->ec;
7778     + ubi->lookuptbl[e->pnum] = e;
7779     +
7780     + if (sync) {
7781     + err = sync_erase(ubi, e, false);
7782     + if (err)
7783     + goto out_free;
7784     +
7785     + wl_tree_add(e, &ubi->free);
7786     + ubi->free_count++;
7787     + } else {
7788     + err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
7789     + if (err)
7790     + goto out_free;
7791     + }
7792     +
7793     + return 0;
7794     +
7795     +out_free:
7796     + wl_entry_destroy(ubi, e);
7797     +
7798     + return err;
7799     +}
7800     +
7801     /**
7802     * ubi_wl_init - initialize the WL sub-system using attaching information.
7803     * @ubi: UBI device description object
7804     @@ -1566,18 +1606,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
7805     list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
7806     cond_resched();
7807    
7808     - e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
7809     - if (!e)
7810     + err = erase_aeb(ubi, aeb, false);
7811     + if (err)
7812     goto out_free;
7813    
7814     - e->pnum = aeb->pnum;
7815     - e->ec = aeb->ec;
7816     - ubi->lookuptbl[e->pnum] = e;
7817     - if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
7818     - wl_entry_destroy(ubi, e);
7819     - goto out_free;
7820     - }
7821     -
7822     found_pebs++;
7823     }
7824    
7825     @@ -1635,6 +1667,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
7826     ubi_assert(!ubi->lookuptbl[e->pnum]);
7827     ubi->lookuptbl[e->pnum] = e;
7828     } else {
7829     + bool sync = false;
7830     +
7831     /*
7832     * Usually old Fastmap PEBs are scheduled for erasure
7833     * and we don't have to care about them but if we face
7834     @@ -1644,18 +1678,21 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
7835     if (ubi->lookuptbl[aeb->pnum])
7836     continue;
7837    
7838     - e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
7839     - if (!e)
7840     - goto out_free;
7841     + /*
7842     + * The fastmap update code might not find a free PEB for
7843     + * writing the fastmap anchor to and then reuses the
7844     + * current fastmap anchor PEB. When this PEB gets erased
7845     + * and a power cut happens before it is written again we
7846     + * must make sure that the fastmap attach code doesn't
7847     + * find any outdated fastmap anchors, hence we erase the
7848     + * outdated fastmap anchor PEBs synchronously here.
7849     + */
7850     + if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
7851     + sync = true;
7852    
7853     - e->pnum = aeb->pnum;
7854     - e->ec = aeb->ec;
7855     - ubi_assert(!ubi->lookuptbl[e->pnum]);
7856     - ubi->lookuptbl[e->pnum] = e;
7857     - if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
7858     - wl_entry_destroy(ubi, e);
7859     + err = erase_aeb(ubi, aeb, sync);
7860     + if (err)
7861     goto out_free;
7862     - }
7863     }
7864    
7865     found_pebs++;
7866     diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
7867     index 8ce262fc2561..51b40aecb776 100644
7868     --- a/drivers/perf/arm_spe_pmu.c
7869     +++ b/drivers/perf/arm_spe_pmu.c
7870     @@ -1164,6 +1164,15 @@ static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev)
7871     struct arm_spe_pmu *spe_pmu;
7872     struct device *dev = &pdev->dev;
7873    
7874     + /*
7875     + * If kernelspace is unmapped when running at EL0, then the SPE
7876     + * buffer will fault and prematurely terminate the AUX session.
7877     + */
7878     + if (arm64_kernel_unmapped_at_el0()) {
7879     + dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
7880     + return -EPERM;
7881     + }
7882     +
7883     spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
7884     if (!spe_pmu) {
7885     dev_err(dev, "failed to allocate spe_pmu\n");
7886     diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
7887     index 12a1af45acb9..32209f37b2be 100644
7888     --- a/drivers/pinctrl/intel/pinctrl-intel.c
7889     +++ b/drivers/pinctrl/intel/pinctrl-intel.c
7890     @@ -425,6 +425,18 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
7891     writel(value, padcfg0);
7892     }
7893    
7894     +static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
7895     +{
7896     + u32 value;
7897     +
7898     + /* Put the pad into GPIO mode */
7899     + value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
7900     + /* Disable SCI/SMI/NMI generation */
7901     + value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
7902     + value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
7903     + writel(value, padcfg0);
7904     +}
7905     +
7906     static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
7907     struct pinctrl_gpio_range *range,
7908     unsigned pin)
7909     @@ -432,7 +444,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
7910     struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
7911     void __iomem *padcfg0;
7912     unsigned long flags;
7913     - u32 value;
7914    
7915     raw_spin_lock_irqsave(&pctrl->lock, flags);
7916    
7917     @@ -442,13 +453,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
7918     }
7919    
7920     padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
7921     - /* Put the pad into GPIO mode */
7922     - value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
7923     - /* Disable SCI/SMI/NMI generation */
7924     - value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
7925     - value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
7926     - writel(value, padcfg0);
7927     -
7928     + intel_gpio_set_gpio_mode(padcfg0);
7929     /* Disable TX buffer and enable RX (this will be input) */
7930     __intel_gpio_set_direction(padcfg0, true);
7931    
7932     @@ -935,6 +940,8 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
7933    
7934     raw_spin_lock_irqsave(&pctrl->lock, flags);
7935    
7936     + intel_gpio_set_gpio_mode(reg);
7937     +
7938     value = readl(reg);
7939    
7940     value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
7941     diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
7942     index 4a6ea159c65d..c490899b77e5 100644
7943     --- a/drivers/pinctrl/pinctrl-mcp23s08.c
7944     +++ b/drivers/pinctrl/pinctrl-mcp23s08.c
7945     @@ -896,16 +896,16 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
7946     goto fail;
7947     }
7948    
7949     - ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
7950     - if (ret < 0)
7951     - goto fail;
7952     -
7953     if (mcp->irq && mcp->irq_controller) {
7954     ret = mcp23s08_irq_setup(mcp);
7955     if (ret)
7956     goto fail;
7957     }
7958    
7959     + ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
7960     + if (ret < 0)
7961     + goto fail;
7962     +
7963     mcp->pinctrl_desc.name = "mcp23xxx-pinctrl";
7964     mcp->pinctrl_desc.pctlops = &mcp_pinctrl_ops;
7965     mcp->pinctrl_desc.confops = &mcp_pinconf_ops;
7966     diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
7967     index fb242c542dc9..cbf58a10113d 100644
7968     --- a/drivers/pinctrl/pinctrl-sx150x.c
7969     +++ b/drivers/pinctrl/pinctrl-sx150x.c
7970     @@ -1144,6 +1144,27 @@ static int sx150x_probe(struct i2c_client *client,
7971     if (ret)
7972     return ret;
7973    
7974     + /* Pinctrl_desc */
7975     + pctl->pinctrl_desc.name = "sx150x-pinctrl";
7976     + pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops;
7977     + pctl->pinctrl_desc.confops = &sx150x_pinconf_ops;
7978     + pctl->pinctrl_desc.pins = pctl->data->pins;
7979     + pctl->pinctrl_desc.npins = pctl->data->npins;
7980     + pctl->pinctrl_desc.owner = THIS_MODULE;
7981     +
7982     + ret = devm_pinctrl_register_and_init(dev, &pctl->pinctrl_desc,
7983     + pctl, &pctl->pctldev);
7984     + if (ret) {
7985     + dev_err(dev, "Failed to register pinctrl device\n");
7986     + return ret;
7987     + }
7988     +
7989     + ret = pinctrl_enable(pctl->pctldev);
7990     + if (ret) {
7991     + dev_err(dev, "Failed to enable pinctrl device\n");
7992     + return ret;
7993     + }
7994     +
7995     /* Register GPIO controller */
7996     pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
7997     pctl->gpio.base = -1;
7998     @@ -1172,6 +1193,11 @@ static int sx150x_probe(struct i2c_client *client,
7999     if (ret)
8000     return ret;
8001    
8002     + ret = gpiochip_add_pin_range(&pctl->gpio, dev_name(dev),
8003     + 0, 0, pctl->data->npins);
8004     + if (ret)
8005     + return ret;
8006     +
8007     /* Add Interrupt support if an irq is specified */
8008     if (client->irq > 0) {
8009     pctl->irq_chip.name = devm_kstrdup(dev, client->name,
8010     @@ -1217,20 +1243,6 @@ static int sx150x_probe(struct i2c_client *client,
8011     client->irq);
8012     }
8013    
8014     - /* Pinctrl_desc */
8015     - pctl->pinctrl_desc.name = "sx150x-pinctrl";
8016     - pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops;
8017     - pctl->pinctrl_desc.confops = &sx150x_pinconf_ops;
8018     - pctl->pinctrl_desc.pins = pctl->data->pins;
8019     - pctl->pinctrl_desc.npins = pctl->data->npins;
8020     - pctl->pinctrl_desc.owner = THIS_MODULE;
8021     -
8022     - pctl->pctldev = pinctrl_register(&pctl->pinctrl_desc, dev, pctl);
8023     - if (IS_ERR(pctl->pctldev)) {
8024     - dev_err(dev, "Failed to register pinctrl device\n");
8025     - return PTR_ERR(pctl->pctldev);
8026     - }
8027     -
8028     return 0;
8029     }
8030    
8031     diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
8032     index 38b3a9c84fd1..48d366304582 100644
8033     --- a/drivers/scsi/cxlflash/main.c
8034     +++ b/drivers/scsi/cxlflash/main.c
8035     @@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
8036     cmd->parent = afu;
8037     cmd->hwq_index = hwq_index;
8038    
8039     + cmd->sa.ioasc = 0;
8040     cmd->rcb.ctx_id = hwq->ctx_hndl;
8041     cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
8042     cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
8043     diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
8044     index fe3a0da3ec97..57bf43e34863 100644
8045     --- a/drivers/scsi/hosts.c
8046     +++ b/drivers/scsi/hosts.c
8047     @@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev)
8048    
8049     scsi_proc_hostdir_rm(shost->hostt);
8050    
8051     + /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
8052     + rcu_barrier();
8053     +
8054     if (shost->tmf_work_q)
8055     destroy_workqueue(shost->tmf_work_q);
8056     if (shost->ehandler)
8057     @@ -325,6 +328,8 @@ static void scsi_host_dev_release(struct device *dev)
8058     if (shost->work_q)
8059     destroy_workqueue(shost->work_q);
8060    
8061     + destroy_rcu_head(&shost->rcu);
8062     +
8063     if (shost->shost_state == SHOST_CREATED) {
8064     /*
8065     * Free the shost_dev device name here if scsi_host_alloc()
8066     @@ -399,6 +404,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
8067     INIT_LIST_HEAD(&shost->starved_list);
8068     init_waitqueue_head(&shost->host_wait);
8069     mutex_init(&shost->scan_mutex);
8070     + init_rcu_head(&shost->rcu);
8071    
8072     index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
8073     if (index < 0)
8074     diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
8075     index 2b7ea7e53e12..a28b2994b009 100644
8076     --- a/drivers/scsi/lpfc/lpfc_init.c
8077     +++ b/drivers/scsi/lpfc/lpfc_init.c
8078     @@ -9421,44 +9421,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
8079     lpfc_sli4_bar0_register_memmap(phba, if_type);
8080     }
8081    
8082     - if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
8083     - (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
8084     - /*
8085     - * Map SLI4 if type 0 HBA Control Register base to a kernel
8086     - * virtual address and setup the registers.
8087     - */
8088     - phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
8089     - bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
8090     - phba->sli4_hba.ctrl_regs_memmap_p =
8091     - ioremap(phba->pci_bar1_map, bar1map_len);
8092     - if (!phba->sli4_hba.ctrl_regs_memmap_p) {
8093     - dev_printk(KERN_ERR, &pdev->dev,
8094     - "ioremap failed for SLI4 HBA control registers.\n");
8095     + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
8096     + if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
8097     + /*
8098     + * Map SLI4 if type 0 HBA Control Register base to a
8099     + * kernel virtual address and setup the registers.
8100     + */
8101     + phba->pci_bar1_map = pci_resource_start(pdev,
8102     + PCI_64BIT_BAR2);
8103     + bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
8104     + phba->sli4_hba.ctrl_regs_memmap_p =
8105     + ioremap(phba->pci_bar1_map,
8106     + bar1map_len);
8107     + if (!phba->sli4_hba.ctrl_regs_memmap_p) {
8108     + dev_err(&pdev->dev,
8109     + "ioremap failed for SLI4 HBA "
8110     + "control registers.\n");
8111     + error = -ENOMEM;
8112     + goto out_iounmap_conf;
8113     + }
8114     + phba->pci_bar2_memmap_p =
8115     + phba->sli4_hba.ctrl_regs_memmap_p;
8116     + lpfc_sli4_bar1_register_memmap(phba);
8117     + } else {
8118     + error = -ENOMEM;
8119     goto out_iounmap_conf;
8120     }
8121     - phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
8122     - lpfc_sli4_bar1_register_memmap(phba);
8123     }
8124    
8125     - if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
8126     - (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
8127     - /*
8128     - * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
8129     - * virtual address and setup the registers.
8130     - */
8131     - phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
8132     - bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
8133     - phba->sli4_hba.drbl_regs_memmap_p =
8134     - ioremap(phba->pci_bar2_map, bar2map_len);
8135     - if (!phba->sli4_hba.drbl_regs_memmap_p) {
8136     - dev_printk(KERN_ERR, &pdev->dev,
8137     - "ioremap failed for SLI4 HBA doorbell registers.\n");
8138     - goto out_iounmap_ctrl;
8139     - }
8140     - phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
8141     - error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
8142     - if (error)
8143     + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
8144     + if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
8145     + /*
8146     + * Map SLI4 if type 0 HBA Doorbell Register base to
8147     + * a kernel virtual address and setup the registers.
8148     + */
8149     + phba->pci_bar2_map = pci_resource_start(pdev,
8150     + PCI_64BIT_BAR4);
8151     + bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
8152     + phba->sli4_hba.drbl_regs_memmap_p =
8153     + ioremap(phba->pci_bar2_map,
8154     + bar2map_len);
8155     + if (!phba->sli4_hba.drbl_regs_memmap_p) {
8156     + dev_err(&pdev->dev,
8157     + "ioremap failed for SLI4 HBA"
8158     + " doorbell registers.\n");
8159     + error = -ENOMEM;
8160     + goto out_iounmap_ctrl;
8161     + }
8162     + phba->pci_bar4_memmap_p =
8163     + phba->sli4_hba.drbl_regs_memmap_p;
8164     + error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
8165     + if (error)
8166     + goto out_iounmap_all;
8167     + } else {
8168     + error = -ENOMEM;
8169     goto out_iounmap_all;
8170     + }
8171     }
8172    
8173     return 0;
8174     diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
8175     index 62b56de38ae8..3737c6d3b064 100644
8176     --- a/drivers/scsi/scsi_error.c
8177     +++ b/drivers/scsi/scsi_error.c
8178     @@ -220,6 +220,17 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
8179     }
8180     }
8181    
8182     +static void scsi_eh_inc_host_failed(struct rcu_head *head)
8183     +{
8184     + struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
8185     + unsigned long flags;
8186     +
8187     + spin_lock_irqsave(shost->host_lock, flags);
8188     + shost->host_failed++;
8189     + scsi_eh_wakeup(shost);
8190     + spin_unlock_irqrestore(shost->host_lock, flags);
8191     +}
8192     +
8193     /**
8194     * scsi_eh_scmd_add - add scsi cmd to error handling.
8195     * @scmd: scmd to run eh on.
8196     @@ -242,9 +253,12 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
8197    
8198     scsi_eh_reset(scmd);
8199     list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
8200     - shost->host_failed++;
8201     - scsi_eh_wakeup(shost);
8202     spin_unlock_irqrestore(shost->host_lock, flags);
8203     + /*
8204     + * Ensure that all tasks observe the host state change before the
8205     + * host_failed change.
8206     + */
8207     + call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
8208     }
8209    
8210     /**
8211     diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
8212     index d9ca1dfab154..83856ee14851 100644
8213     --- a/drivers/scsi/scsi_lib.c
8214     +++ b/drivers/scsi/scsi_lib.c
8215     @@ -318,22 +318,39 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
8216     cmd->cmd_len = scsi_command_size(cmd->cmnd);
8217     }
8218    
8219     -void scsi_device_unbusy(struct scsi_device *sdev)
8220     +/*
8221     + * Decrement the host_busy counter and wake up the error handler if necessary.
8222     + * Avoid as follows that the error handler is not woken up if shost->host_busy
8223     + * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
8224     + * with an RCU read lock in this function to ensure that this function in its
8225     + * entirety either finishes before scsi_eh_scmd_add() increases the
8226     + * host_failed counter or that it notices the shost state change made by
8227     + * scsi_eh_scmd_add().
8228     + */
8229     +static void scsi_dec_host_busy(struct Scsi_Host *shost)
8230     {
8231     - struct Scsi_Host *shost = sdev->host;
8232     - struct scsi_target *starget = scsi_target(sdev);
8233     unsigned long flags;
8234    
8235     + rcu_read_lock();
8236     atomic_dec(&shost->host_busy);
8237     - if (starget->can_queue > 0)
8238     - atomic_dec(&starget->target_busy);
8239     -
8240     - if (unlikely(scsi_host_in_recovery(shost) &&
8241     - (shost->host_failed || shost->host_eh_scheduled))) {
8242     + if (unlikely(scsi_host_in_recovery(shost))) {
8243     spin_lock_irqsave(shost->host_lock, flags);
8244     - scsi_eh_wakeup(shost);
8245     + if (shost->host_failed || shost->host_eh_scheduled)
8246     + scsi_eh_wakeup(shost);
8247     spin_unlock_irqrestore(shost->host_lock, flags);
8248     }
8249     + rcu_read_unlock();
8250     +}
8251     +
8252     +void scsi_device_unbusy(struct scsi_device *sdev)
8253     +{
8254     + struct Scsi_Host *shost = sdev->host;
8255     + struct scsi_target *starget = scsi_target(sdev);
8256     +
8257     + scsi_dec_host_busy(shost);
8258     +
8259     + if (starget->can_queue > 0)
8260     + atomic_dec(&starget->target_busy);
8261    
8262     atomic_dec(&sdev->device_busy);
8263     }
8264     @@ -1532,7 +1549,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
8265     list_add_tail(&sdev->starved_entry, &shost->starved_list);
8266     spin_unlock_irq(shost->host_lock);
8267     out_dec:
8268     - atomic_dec(&shost->host_busy);
8269     + scsi_dec_host_busy(shost);
8270     return 0;
8271     }
8272    
8273     @@ -2020,7 +2037,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
8274     return BLK_STS_OK;
8275    
8276     out_dec_host_busy:
8277     - atomic_dec(&shost->host_busy);
8278     + scsi_dec_host_busy(shost);
8279     out_dec_target_busy:
8280     if (scsi_target(sdev)->can_queue > 0)
8281     atomic_dec(&scsi_target(sdev)->target_busy);
8282     diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
8283     index 71c73766ee22..65af12c3bdb2 100644
8284     --- a/drivers/ssb/Kconfig
8285     +++ b/drivers/ssb/Kconfig
8286     @@ -32,7 +32,7 @@ config SSB_BLOCKIO
8287    
8288     config SSB_PCIHOST_POSSIBLE
8289     bool
8290     - depends on SSB && (PCI = y || PCI = SSB) && PCI_DRIVERS_LEGACY
8291     + depends on SSB && (PCI = y || PCI = SSB) && (PCI_DRIVERS_LEGACY || !MIPS)
8292     default y
8293    
8294     config SSB_PCIHOST
8295     diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
8296     index 2e5d311d2438..db81ed527452 100644
8297     --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
8298     +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
8299     @@ -120,6 +120,7 @@ static struct shash_alg alg = {
8300     .cra_name = "adler32",
8301     .cra_driver_name = "adler32-zlib",
8302     .cra_priority = 100,
8303     + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
8304     .cra_blocksize = CHKSUM_BLOCK_SIZE,
8305     .cra_ctxsize = sizeof(u32),
8306     .cra_module = THIS_MODULE,
8307     diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
8308     index ca200d1f310a..5a606a4aee6a 100644
8309     --- a/drivers/watchdog/Kconfig
8310     +++ b/drivers/watchdog/Kconfig
8311     @@ -1451,7 +1451,7 @@ config RC32434_WDT
8312    
8313     config INDYDOG
8314     tristate "Indy/I2 Hardware Watchdog"
8315     - depends on SGI_HAS_INDYDOG || (MIPS && COMPILE_TEST)
8316     + depends on SGI_HAS_INDYDOG
8317     help
8318     Hardware driver for the Indy's/I2's watchdog. This is a
8319     watchdog timer that will reboot the machine after a 60 second
8320     diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
8321     index cb66c2f99ff1..7a6279daa8b9 100644
8322     --- a/drivers/watchdog/gpio_wdt.c
8323     +++ b/drivers/watchdog/gpio_wdt.c
8324     @@ -80,7 +80,8 @@ static int gpio_wdt_stop(struct watchdog_device *wdd)
8325    
8326     if (!priv->always_running) {
8327     gpio_wdt_disable(priv);
8328     - clear_bit(WDOG_HW_RUNNING, &wdd->status);
8329     + } else {
8330     + set_bit(WDOG_HW_RUNNING, &wdd->status);
8331     }
8332    
8333     return 0;
8334     diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
8335     index 4874b0f18650..518dfa1047cb 100644
8336     --- a/drivers/watchdog/imx2_wdt.c
8337     +++ b/drivers/watchdog/imx2_wdt.c
8338     @@ -169,15 +169,21 @@ static int imx2_wdt_ping(struct watchdog_device *wdog)
8339     return 0;
8340     }
8341    
8342     -static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
8343     - unsigned int new_timeout)
8344     +static void __imx2_wdt_set_timeout(struct watchdog_device *wdog,
8345     + unsigned int new_timeout)
8346     {
8347     struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
8348    
8349     - wdog->timeout = new_timeout;
8350     -
8351     regmap_update_bits(wdev->regmap, IMX2_WDT_WCR, IMX2_WDT_WCR_WT,
8352     WDOG_SEC_TO_COUNT(new_timeout));
8353     +}
8354     +
8355     +static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
8356     + unsigned int new_timeout)
8357     +{
8358     + __imx2_wdt_set_timeout(wdog, new_timeout);
8359     +
8360     + wdog->timeout = new_timeout;
8361     return 0;
8362     }
8363    
8364     @@ -371,7 +377,11 @@ static int imx2_wdt_suspend(struct device *dev)
8365    
8366     /* The watchdog IP block is running */
8367     if (imx2_wdt_is_running(wdev)) {
8368     - imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
8369     + /*
8370     + * Don't update wdog->timeout, we'll restore the current value
8371     + * during resume.
8372     + */
8373     + __imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
8374     imx2_wdt_ping(wdog);
8375     }
8376    
8377     diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
8378     index a537368ba0db..fd9f28b8a933 100644
8379     --- a/fs/afs/addr_list.c
8380     +++ b/fs/afs/addr_list.c
8381     @@ -332,11 +332,18 @@ bool afs_iterate_addresses(struct afs_addr_cursor *ac)
8382     */
8383     int afs_end_cursor(struct afs_addr_cursor *ac)
8384     {
8385     - if (ac->responded && ac->index != ac->start)
8386     - WRITE_ONCE(ac->alist->index, ac->index);
8387     + struct afs_addr_list *alist;
8388     +
8389     + alist = ac->alist;
8390     + if (alist) {
8391     + if (ac->responded && ac->index != ac->start)
8392     + WRITE_ONCE(alist->index, ac->index);
8393     + afs_put_addrlist(alist);
8394     + }
8395    
8396     - afs_put_addrlist(ac->alist);
8397     + ac->addr = NULL;
8398     ac->alist = NULL;
8399     + ac->begun = false;
8400     return ac->error;
8401     }
8402    
8403     diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
8404     index d04511fb3879..892a4904fd77 100644
8405     --- a/fs/afs/rotate.c
8406     +++ b/fs/afs/rotate.c
8407     @@ -334,6 +334,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
8408    
8409     next_server:
8410     _debug("next");
8411     + afs_end_cursor(&fc->ac);
8412     afs_put_cb_interest(afs_v2net(vnode), fc->cbi);
8413     fc->cbi = NULL;
8414     fc->index++;
8415     @@ -383,6 +384,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
8416     afs_get_addrlist(alist);
8417     read_unlock(&server->fs_lock);
8418    
8419     + memset(&fc->ac, 0, sizeof(fc->ac));
8420    
8421     /* Probe the current fileserver if we haven't done so yet. */
8422     if (!test_bit(AFS_SERVER_FL_PROBED, &server->flags)) {
8423     @@ -397,11 +399,8 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
8424     else
8425     afs_put_addrlist(alist);
8426    
8427     - fc->ac.addr = NULL;
8428     fc->ac.start = READ_ONCE(alist->index);
8429     fc->ac.index = fc->ac.start;
8430     - fc->ac.error = 0;
8431     - fc->ac.begun = false;
8432     goto iterate_address;
8433    
8434     iterate_address:
8435     @@ -410,16 +409,15 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
8436     /* Iterate over the current server's address list to try and find an
8437     * address on which it will respond to us.
8438     */
8439     - if (afs_iterate_addresses(&fc->ac)) {
8440     - _leave(" = t");
8441     - return true;
8442     - }
8443     + if (!afs_iterate_addresses(&fc->ac))
8444     + goto next_server;
8445    
8446     - afs_end_cursor(&fc->ac);
8447     - goto next_server;
8448     + _leave(" = t");
8449     + return true;
8450    
8451     failed:
8452     fc->flags |= AFS_FS_CURSOR_STOP;
8453     + afs_end_cursor(&fc->ac);
8454     _leave(" = f [failed %d]", fc->ac.error);
8455     return false;
8456     }
8457     @@ -458,12 +456,10 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
8458     return false;
8459     }
8460    
8461     + memset(&fc->ac, 0, sizeof(fc->ac));
8462     fc->ac.alist = alist;
8463     - fc->ac.addr = NULL;
8464     fc->ac.start = READ_ONCE(alist->index);
8465     fc->ac.index = fc->ac.start;
8466     - fc->ac.error = 0;
8467     - fc->ac.begun = false;
8468     goto iterate_address;
8469    
8470     case 0:
8471     diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
8472     index 0ab3f8457839..0f8dc4c8f07c 100644
8473     --- a/fs/afs/server_list.c
8474     +++ b/fs/afs/server_list.c
8475     @@ -58,7 +58,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
8476     server = afs_lookup_server(cell, key, &vldb->fs_server[i]);
8477     if (IS_ERR(server)) {
8478     ret = PTR_ERR(server);
8479     - if (ret == -ENOENT)
8480     + if (ret == -ENOENT ||
8481     + ret == -ENOMEDIUM)
8482     continue;
8483     goto error_2;
8484     }
8485     diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
8486     index e372f89fd36a..5d8562f1ad4a 100644
8487     --- a/fs/afs/vlclient.c
8488     +++ b/fs/afs/vlclient.c
8489     @@ -23,7 +23,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
8490     struct afs_uvldbentry__xdr *uvldb;
8491     struct afs_vldb_entry *entry;
8492     bool new_only = false;
8493     - u32 tmp;
8494     + u32 tmp, nr_servers;
8495     int i, ret;
8496    
8497     _enter("");
8498     @@ -36,6 +36,10 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
8499     uvldb = call->buffer;
8500     entry = call->reply[0];
8501    
8502     + nr_servers = ntohl(uvldb->nServers);
8503     + if (nr_servers > AFS_NMAXNSERVERS)
8504     + nr_servers = AFS_NMAXNSERVERS;
8505     +
8506     for (i = 0; i < ARRAY_SIZE(uvldb->name) - 1; i++)
8507     entry->name[i] = (u8)ntohl(uvldb->name[i]);
8508     entry->name[i] = 0;
8509     @@ -44,14 +48,14 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
8510     /* If there is a new replication site that we can use, ignore all the
8511     * sites that aren't marked as new.
8512     */
8513     - for (i = 0; i < AFS_NMAXNSERVERS; i++) {
8514     + for (i = 0; i < nr_servers; i++) {
8515     tmp = ntohl(uvldb->serverFlags[i]);
8516     if (!(tmp & AFS_VLSF_DONTUSE) &&
8517     (tmp & AFS_VLSF_NEWREPSITE))
8518     new_only = true;
8519     }
8520    
8521     - for (i = 0; i < AFS_NMAXNSERVERS; i++) {
8522     + for (i = 0; i < nr_servers; i++) {
8523     struct afs_uuid__xdr *xdr;
8524     struct afs_uuid *uuid;
8525     int j;
8526     diff --git a/fs/afs/volume.c b/fs/afs/volume.c
8527     index 684c48293353..b517a588781f 100644
8528     --- a/fs/afs/volume.c
8529     +++ b/fs/afs/volume.c
8530     @@ -26,9 +26,8 @@ static struct afs_volume *afs_alloc_volume(struct afs_mount_params *params,
8531     unsigned long type_mask)
8532     {
8533     struct afs_server_list *slist;
8534     - struct afs_server *server;
8535     struct afs_volume *volume;
8536     - int ret = -ENOMEM, nr_servers = 0, i, j;
8537     + int ret = -ENOMEM, nr_servers = 0, i;
8538    
8539     for (i = 0; i < vldb->nr_servers; i++)
8540     if (vldb->fs_mask[i] & type_mask)
8541     @@ -58,50 +57,10 @@ static struct afs_volume *afs_alloc_volume(struct afs_mount_params *params,
8542    
8543     refcount_set(&slist->usage, 1);
8544     volume->servers = slist;
8545     -
8546     - /* Make sure a records exists for each server this volume occupies. */
8547     - for (i = 0; i < nr_servers; i++) {
8548     - if (!(vldb->fs_mask[i] & type_mask))
8549     - continue;
8550     -
8551     - server = afs_lookup_server(params->cell, params->key,
8552     - &vldb->fs_server[i]);
8553     - if (IS_ERR(server)) {
8554     - ret = PTR_ERR(server);
8555     - if (ret == -ENOENT)
8556     - continue;
8557     - goto error_2;
8558     - }
8559     -
8560     - /* Insertion-sort by server pointer */
8561     - for (j = 0; j < slist->nr_servers; j++)
8562     - if (slist->servers[j].server >= server)
8563     - break;
8564     - if (j < slist->nr_servers) {
8565     - if (slist->servers[j].server == server) {
8566     - afs_put_server(params->net, server);
8567     - continue;
8568     - }
8569     -
8570     - memmove(slist->servers + j + 1,
8571     - slist->servers + j,
8572     - (slist->nr_servers - j) * sizeof(struct afs_server_entry));
8573     - }
8574     -
8575     - slist->servers[j].server = server;
8576     - slist->nr_servers++;
8577     - }
8578     -
8579     - if (slist->nr_servers == 0) {
8580     - ret = -EDESTADDRREQ;
8581     - goto error_2;
8582     - }
8583     -
8584     return volume;
8585    
8586     -error_2:
8587     - afs_put_serverlist(params->net, slist);
8588     error_1:
8589     + afs_put_cell(params->net, volume->cell);
8590     kfree(volume);
8591     error_0:
8592     return ERR_PTR(ret);
8593     @@ -327,7 +286,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
8594    
8595     /* See if the volume's server list got updated. */
8596     new = afs_alloc_server_list(volume->cell, key,
8597     - vldb, (1 << volume->type));
8598     + vldb, (1 << volume->type));
8599     if (IS_ERR(new)) {
8600     ret = PTR_ERR(new);
8601     goto error_vldb;
8602     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
8603     index e1a7f3cb5be9..0f57602092cf 100644
8604     --- a/fs/btrfs/inode.c
8605     +++ b/fs/btrfs/inode.c
8606     @@ -2098,8 +2098,15 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
8607     goto out;
8608     }
8609    
8610     - btrfs_set_extent_delalloc(inode, page_start, page_end, 0, &cached_state,
8611     - 0);
8612     + ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
8613     + &cached_state, 0);
8614     + if (ret) {
8615     + mapping_set_error(page->mapping, ret);
8616     + end_extent_writepage(page, ret, page_start, page_end);
8617     + ClearPageChecked(page);
8618     + goto out;
8619     + }
8620     +
8621     ClearPageChecked(page);
8622     set_page_dirty(page);
8623     btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8624     diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
8625     index a7f79254ecca..8903c4fbf7e6 100644
8626     --- a/fs/btrfs/raid56.c
8627     +++ b/fs/btrfs/raid56.c
8628     @@ -1435,14 +1435,13 @@ static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
8629     */
8630     static void set_bio_pages_uptodate(struct bio *bio)
8631     {
8632     - struct bio_vec bvec;
8633     - struct bvec_iter iter;
8634     + struct bio_vec *bvec;
8635     + int i;
8636    
8637     - if (bio_flagged(bio, BIO_CLONED))
8638     - bio->bi_iter = btrfs_io_bio(bio)->iter;
8639     + ASSERT(!bio_flagged(bio, BIO_CLONED));
8640    
8641     - bio_for_each_segment(bvec, bio, iter)
8642     - SetPageUptodate(bvec.bv_page);
8643     + bio_for_each_segment_all(bvec, bio, i)
8644     + SetPageUptodate(bvec->bv_page);
8645     }
8646    
8647     /*
8648     diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
8649     index 68abbb0db608..f2b0a7f124da 100644
8650     --- a/fs/cifs/cifsencrypt.c
8651     +++ b/fs/cifs/cifsencrypt.c
8652     @@ -325,9 +325,8 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
8653     {
8654     int i;
8655     int rc;
8656     - char password_with_pad[CIFS_ENCPWD_SIZE];
8657     + char password_with_pad[CIFS_ENCPWD_SIZE] = {0};
8658    
8659     - memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
8660     if (password)
8661     strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE);
8662    
8663     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
8664     index 0bfc2280436d..f7db2fedfa8c 100644
8665     --- a/fs/cifs/connect.c
8666     +++ b/fs/cifs/connect.c
8667     @@ -1707,7 +1707,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
8668     tmp_end++;
8669     if (!(tmp_end < end && tmp_end[1] == delim)) {
8670     /* No it is not. Set the password to NULL */
8671     - kfree(vol->password);
8672     + kzfree(vol->password);
8673     vol->password = NULL;
8674     break;
8675     }
8676     @@ -1745,7 +1745,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
8677     options = end;
8678     }
8679    
8680     - kfree(vol->password);
8681     + kzfree(vol->password);
8682     /* Now build new password string */
8683     temp_len = strlen(value);
8684     vol->password = kzalloc(temp_len+1, GFP_KERNEL);
8685     @@ -4235,7 +4235,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
8686     reset_cifs_unix_caps(0, tcon, NULL, vol_info);
8687     out:
8688     kfree(vol_info->username);
8689     - kfree(vol_info->password);
8690     + kzfree(vol_info->password);
8691     kfree(vol_info);
8692    
8693     return tcon;
8694     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
8695     index df9f682708c6..3a85df2a9baf 100644
8696     --- a/fs/cifs/file.c
8697     +++ b/fs/cifs/file.c
8698     @@ -3471,20 +3471,18 @@ static const struct vm_operations_struct cifs_file_vm_ops = {
8699    
8700     int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
8701     {
8702     - int rc, xid;
8703     + int xid, rc = 0;
8704     struct inode *inode = file_inode(file);
8705    
8706     xid = get_xid();
8707    
8708     - if (!CIFS_CACHE_READ(CIFS_I(inode))) {
8709     + if (!CIFS_CACHE_READ(CIFS_I(inode)))
8710     rc = cifs_zap_mapping(inode);
8711     - if (rc)
8712     - return rc;
8713     - }
8714     -
8715     - rc = generic_file_mmap(file, vma);
8716     - if (rc == 0)
8717     + if (!rc)
8718     + rc = generic_file_mmap(file, vma);
8719     + if (!rc)
8720     vma->vm_ops = &cifs_file_vm_ops;
8721     +
8722     free_xid(xid);
8723     return rc;
8724     }
8725     @@ -3494,16 +3492,16 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
8726     int rc, xid;
8727    
8728     xid = get_xid();
8729     +
8730     rc = cifs_revalidate_file(file);
8731     - if (rc) {
8732     + if (rc)
8733     cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
8734     rc);
8735     - free_xid(xid);
8736     - return rc;
8737     - }
8738     - rc = generic_file_mmap(file, vma);
8739     - if (rc == 0)
8740     + if (!rc)
8741     + rc = generic_file_mmap(file, vma);
8742     + if (!rc)
8743     vma->vm_ops = &cifs_file_vm_ops;
8744     +
8745     free_xid(xid);
8746     return rc;
8747     }
8748     diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
8749     index eea93ac15ef0..a0dbced4a45c 100644
8750     --- a/fs/cifs/misc.c
8751     +++ b/fs/cifs/misc.c
8752     @@ -98,14 +98,11 @@ sesInfoFree(struct cifs_ses *buf_to_free)
8753     kfree(buf_to_free->serverOS);
8754     kfree(buf_to_free->serverDomain);
8755     kfree(buf_to_free->serverNOS);
8756     - if (buf_to_free->password) {
8757     - memset(buf_to_free->password, 0, strlen(buf_to_free->password));
8758     - kfree(buf_to_free->password);
8759     - }
8760     + kzfree(buf_to_free->password);
8761     kfree(buf_to_free->user_name);
8762     kfree(buf_to_free->domainName);
8763     - kfree(buf_to_free->auth_key.response);
8764     - kfree(buf_to_free);
8765     + kzfree(buf_to_free->auth_key.response);
8766     + kzfree(buf_to_free);
8767     }
8768    
8769     struct cifs_tcon *
8770     @@ -136,10 +133,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
8771     }
8772     atomic_dec(&tconInfoAllocCount);
8773     kfree(buf_to_free->nativeFileSystem);
8774     - if (buf_to_free->password) {
8775     - memset(buf_to_free->password, 0, strlen(buf_to_free->password));
8776     - kfree(buf_to_free->password);
8777     - }
8778     + kzfree(buf_to_free->password);
8779     kfree(buf_to_free);
8780     }
8781    
8782     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
8783     index 01346b8b6edb..66af1f8a13cc 100644
8784     --- a/fs/cifs/smb2pdu.c
8785     +++ b/fs/cifs/smb2pdu.c
8786     @@ -733,8 +733,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
8787     }
8788    
8789     /* check validate negotiate info response matches what we got earlier */
8790     - if (pneg_rsp->Dialect !=
8791     - cpu_to_le16(tcon->ses->server->vals->protocol_id))
8792     + if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect))
8793     goto vneg_out;
8794    
8795     if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
8796     diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
8797     index 7eae33ffa3fc..e31d6ed3ec32 100644
8798     --- a/fs/devpts/inode.c
8799     +++ b/fs/devpts/inode.c
8800     @@ -168,11 +168,11 @@ struct vfsmount *devpts_mntget(struct file *filp, struct pts_fs_info *fsi)
8801     dput(path.dentry);
8802     if (err) {
8803     mntput(path.mnt);
8804     - path.mnt = ERR_PTR(err);
8805     + return ERR_PTR(err);
8806     }
8807     if (DEVPTS_SB(path.mnt->mnt_sb) != fsi) {
8808     mntput(path.mnt);
8809     - path.mnt = ERR_PTR(-ENODEV);
8810     + return ERR_PTR(-ENODEV);
8811     }
8812     return path.mnt;
8813     }
8814     diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
8815     index 9698e51656b1..d8f49c412f50 100644
8816     --- a/fs/kernfs/file.c
8817     +++ b/fs/kernfs/file.c
8818     @@ -275,7 +275,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
8819     {
8820     struct kernfs_open_file *of = kernfs_of(file);
8821     const struct kernfs_ops *ops;
8822     - size_t len;
8823     + ssize_t len;
8824     char *buf;
8825    
8826     if (of->atomic_write_len) {
8827     diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
8828     index d2972d537469..8c10b0562e75 100644
8829     --- a/fs/nfs/direct.c
8830     +++ b/fs/nfs/direct.c
8831     @@ -775,10 +775,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
8832    
8833     spin_lock(&dreq->lock);
8834    
8835     - if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
8836     - dreq->flags = 0;
8837     + if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
8838     dreq->error = hdr->error;
8839     - }
8840     if (dreq->error == 0) {
8841     nfs_direct_good_bytes(dreq, hdr);
8842     if (nfs_write_need_commit(hdr)) {
8843     diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
8844     index 4e54d8b5413a..d175724ff566 100644
8845     --- a/fs/nfs/filelayout/filelayout.c
8846     +++ b/fs/nfs/filelayout/filelayout.c
8847     @@ -895,9 +895,7 @@ fl_pnfs_update_layout(struct inode *ino,
8848    
8849     lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
8850     gfp_flags);
8851     - if (!lseg)
8852     - lseg = ERR_PTR(-ENOMEM);
8853     - if (IS_ERR(lseg))
8854     + if (IS_ERR_OR_NULL(lseg))
8855     goto out;
8856    
8857     lo = NFS_I(ino)->layout;
8858     diff --git a/fs/nfs/io.c b/fs/nfs/io.c
8859     index 20fef85d2bb1..9034b4926909 100644
8860     --- a/fs/nfs/io.c
8861     +++ b/fs/nfs/io.c
8862     @@ -99,7 +99,7 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode)
8863     {
8864     if (!test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
8865     set_bit(NFS_INO_ODIRECT, &nfsi->flags);
8866     - nfs_wb_all(inode);
8867     + nfs_sync_mapping(inode->i_mapping);
8868     }
8869     }
8870    
8871     diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
8872     index 30426c1a1bbd..22dc30a679a0 100644
8873     --- a/fs/nfs/nfs4idmap.c
8874     +++ b/fs/nfs/nfs4idmap.c
8875     @@ -568,9 +568,13 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
8876     struct idmap_msg *im;
8877     struct idmap *idmap = (struct idmap *)aux;
8878     struct key *key = cons->key;
8879     - int ret = -ENOMEM;
8880     + int ret = -ENOKEY;
8881     +
8882     + if (!aux)
8883     + goto out1;
8884    
8885     /* msg and im are freed in idmap_pipe_destroy_msg */
8886     + ret = -ENOMEM;
8887     data = kzalloc(sizeof(*data), GFP_KERNEL);
8888     if (!data)
8889     goto out1;
8890     diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
8891     index 77c6729e57f0..65c9c4175145 100644
8892     --- a/fs/nfs/nfs4xdr.c
8893     +++ b/fs/nfs/nfs4xdr.c
8894     @@ -7678,6 +7678,22 @@ nfs4_stat_to_errno(int stat)
8895     .p_name = #proc, \
8896     }
8897    
8898     +#if defined(CONFIG_NFS_V4_1)
8899     +#define PROC41(proc, argtype, restype) \
8900     + PROC(proc, argtype, restype)
8901     +#else
8902     +#define PROC41(proc, argtype, restype) \
8903     + STUB(proc)
8904     +#endif
8905     +
8906     +#if defined(CONFIG_NFS_V4_2)
8907     +#define PROC42(proc, argtype, restype) \
8908     + PROC(proc, argtype, restype)
8909     +#else
8910     +#define PROC42(proc, argtype, restype) \
8911     + STUB(proc)
8912     +#endif
8913     +
8914     const struct rpc_procinfo nfs4_procedures[] = {
8915     PROC(READ, enc_read, dec_read),
8916     PROC(WRITE, enc_write, dec_write),
8917     @@ -7698,7 +7714,6 @@ const struct rpc_procinfo nfs4_procedures[] = {
8918     PROC(ACCESS, enc_access, dec_access),
8919     PROC(GETATTR, enc_getattr, dec_getattr),
8920     PROC(LOOKUP, enc_lookup, dec_lookup),
8921     - PROC(LOOKUPP, enc_lookupp, dec_lookupp),
8922     PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root),
8923     PROC(REMOVE, enc_remove, dec_remove),
8924     PROC(RENAME, enc_rename, dec_rename),
8925     @@ -7717,33 +7732,30 @@ const struct rpc_procinfo nfs4_procedures[] = {
8926     PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
8927     PROC(SECINFO, enc_secinfo, dec_secinfo),
8928     PROC(FSID_PRESENT, enc_fsid_present, dec_fsid_present),
8929     -#if defined(CONFIG_NFS_V4_1)
8930     - PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
8931     - PROC(CREATE_SESSION, enc_create_session, dec_create_session),
8932     - PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
8933     - PROC(SEQUENCE, enc_sequence, dec_sequence),
8934     - PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
8935     - PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
8936     - PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
8937     - PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
8938     - PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
8939     - PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn),
8940     - PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
8941     - PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
8942     - PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
8943     + PROC41(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
8944     + PROC41(CREATE_SESSION, enc_create_session, dec_create_session),
8945     + PROC41(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
8946     + PROC41(SEQUENCE, enc_sequence, dec_sequence),
8947     + PROC41(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
8948     + PROC41(RECLAIM_COMPLETE,enc_reclaim_complete, dec_reclaim_complete),
8949     + PROC41(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
8950     + PROC41(LAYOUTGET, enc_layoutget, dec_layoutget),
8951     + PROC41(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
8952     + PROC41(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn),
8953     + PROC41(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
8954     + PROC41(TEST_STATEID, enc_test_stateid, dec_test_stateid),
8955     + PROC41(FREE_STATEID, enc_free_stateid, dec_free_stateid),
8956     STUB(GETDEVICELIST),
8957     - PROC(BIND_CONN_TO_SESSION,
8958     + PROC41(BIND_CONN_TO_SESSION,
8959     enc_bind_conn_to_session, dec_bind_conn_to_session),
8960     - PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
8961     -#endif /* CONFIG_NFS_V4_1 */
8962     -#ifdef CONFIG_NFS_V4_2
8963     - PROC(SEEK, enc_seek, dec_seek),
8964     - PROC(ALLOCATE, enc_allocate, dec_allocate),
8965     - PROC(DEALLOCATE, enc_deallocate, dec_deallocate),
8966     - PROC(LAYOUTSTATS, enc_layoutstats, dec_layoutstats),
8967     - PROC(CLONE, enc_clone, dec_clone),
8968     - PROC(COPY, enc_copy, dec_copy),
8969     -#endif /* CONFIG_NFS_V4_2 */
8970     + PROC41(DESTROY_CLIENTID,enc_destroy_clientid, dec_destroy_clientid),
8971     + PROC42(SEEK, enc_seek, dec_seek),
8972     + PROC42(ALLOCATE, enc_allocate, dec_allocate),
8973     + PROC42(DEALLOCATE, enc_deallocate, dec_deallocate),
8974     + PROC42(LAYOUTSTATS, enc_layoutstats, dec_layoutstats),
8975     + PROC42(CLONE, enc_clone, dec_clone),
8976     + PROC42(COPY, enc_copy, dec_copy),
8977     + PROC(LOOKUPP, enc_lookupp, dec_lookupp),
8978     };
8979    
8980     static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)];
8981     diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
8982     index d602fe9e1ac8..eb098ccfefd5 100644
8983     --- a/fs/nfs/pnfs.c
8984     +++ b/fs/nfs/pnfs.c
8985     @@ -2255,7 +2255,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
8986     nfs_pageio_reset_write_mds(desc);
8987     mirror->pg_recoalesce = 1;
8988     }
8989     - hdr->release(hdr);
8990     + hdr->completion_ops->completion(hdr);
8991     }
8992    
8993     static enum pnfs_try_status
8994     @@ -2378,7 +2378,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
8995     nfs_pageio_reset_read_mds(desc);
8996     mirror->pg_recoalesce = 1;
8997     }
8998     - hdr->release(hdr);
8999     + hdr->completion_ops->completion(hdr);
9000     }
9001    
9002     /*
9003     diff --git a/fs/nfs/write.c b/fs/nfs/write.c
9004     index 4a379d7918f2..cf61108f8f8d 100644
9005     --- a/fs/nfs/write.c
9006     +++ b/fs/nfs/write.c
9007     @@ -1837,6 +1837,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
9008     set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
9009     next:
9010     nfs_unlock_and_release_request(req);
9011     + /* Latency breaker */
9012     + cond_resched();
9013     }
9014     nfss = NFS_SERVER(data->inode);
9015     if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
9016     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
9017     index b29b5a185a2c..5a75135f5f53 100644
9018     --- a/fs/nfsd/nfs4state.c
9019     +++ b/fs/nfsd/nfs4state.c
9020     @@ -3590,6 +3590,7 @@ nfsd4_verify_open_stid(struct nfs4_stid *s)
9021     switch (s->sc_type) {
9022     default:
9023     break;
9024     + case 0:
9025     case NFS4_CLOSED_STID:
9026     case NFS4_CLOSED_DELEG_STID:
9027     ret = nfserr_bad_stateid;
9028     diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
9029     index 00b6b294272a..94d2f8a8b779 100644
9030     --- a/fs/overlayfs/inode.c
9031     +++ b/fs/overlayfs/inode.c
9032     @@ -606,6 +606,16 @@ static int ovl_inode_set(struct inode *inode, void *data)
9033     static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
9034     struct dentry *upperdentry)
9035     {
9036     + if (S_ISDIR(inode->i_mode)) {
9037     + /* Real lower dir moved to upper layer under us? */
9038     + if (!lowerdentry && ovl_inode_lower(inode))
9039     + return false;
9040     +
9041     + /* Lookup of an uncovered redirect origin? */
9042     + if (!upperdentry && ovl_inode_upper(inode))
9043     + return false;
9044     + }
9045     +
9046     /*
9047     * Allow non-NULL lower inode in ovl_inode even if lowerdentry is NULL.
9048     * This happens when finding a copied up overlay inode for a renamed
9049     @@ -633,6 +643,8 @@ struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
9050     struct inode *inode;
9051     /* Already indexed or could be indexed on copy up? */
9052     bool indexed = (index || (ovl_indexdir(dentry->d_sb) && !upperdentry));
9053     + struct dentry *origin = indexed ? lowerdentry : NULL;
9054     + bool is_dir;
9055    
9056     if (WARN_ON(upperdentry && indexed && !lowerdentry))
9057     return ERR_PTR(-EIO);
9058     @@ -641,15 +653,19 @@ struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
9059     realinode = d_inode(lowerdentry);
9060    
9061     /*
9062     - * Copy up origin (lower) may exist for non-indexed upper, but we must
9063     - * not use lower as hash key in that case.
9064     - * Hash inodes that are or could be indexed by origin inode and
9065     - * non-indexed upper inodes that could be hard linked by upper inode.
9066     + * Copy up origin (lower) may exist for non-indexed non-dir upper, but
9067     + * we must not use lower as hash key in that case.
9068     + * Hash non-dir that is or could be indexed by origin inode.
9069     + * Hash dir that is or could be merged by origin inode.
9070     + * Hash pure upper and non-indexed non-dir by upper inode.
9071     */
9072     - if (!S_ISDIR(realinode->i_mode) && (upperdentry || indexed)) {
9073     - struct inode *key = d_inode(indexed ? lowerdentry :
9074     - upperdentry);
9075     - unsigned int nlink;
9076     + is_dir = S_ISDIR(realinode->i_mode);
9077     + if (is_dir)
9078     + origin = lowerdentry;
9079     +
9080     + if (upperdentry || origin) {
9081     + struct inode *key = d_inode(origin ?: upperdentry);
9082     + unsigned int nlink = is_dir ? 1 : realinode->i_nlink;
9083    
9084     inode = iget5_locked(dentry->d_sb, (unsigned long) key,
9085     ovl_inode_test, ovl_inode_set, key);
9086     @@ -670,8 +686,9 @@ struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
9087     goto out;
9088     }
9089    
9090     - nlink = ovl_get_nlink(lowerdentry, upperdentry,
9091     - realinode->i_nlink);
9092     + /* Recalculate nlink for non-dir due to indexing */
9093     + if (!is_dir)
9094     + nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink);
9095     set_nlink(inode, nlink);
9096     } else {
9097     inode = new_inode(dentry->d_sb);
9098     @@ -685,7 +702,7 @@ struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
9099     ovl_set_flag(OVL_IMPURE, inode);
9100    
9101     /* Check for non-merge dir that may have whiteouts */
9102     - if (S_ISDIR(realinode->i_mode)) {
9103     + if (is_dir) {
9104     struct ovl_entry *oe = dentry->d_fsdata;
9105    
9106     if (((upperdentry && lowerdentry) || oe->numlower > 1) ||
9107     diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
9108     index 8c98578d27a1..e258c234f357 100644
9109     --- a/fs/overlayfs/readdir.c
9110     +++ b/fs/overlayfs/readdir.c
9111     @@ -593,8 +593,15 @@ static struct ovl_dir_cache *ovl_cache_get_impure(struct path *path)
9112     return ERR_PTR(res);
9113     }
9114     if (list_empty(&cache->entries)) {
9115     - /* Good oportunity to get rid of an unnecessary "impure" flag */
9116     - ovl_do_removexattr(ovl_dentry_upper(dentry), OVL_XATTR_IMPURE);
9117     + /*
9118     + * A good opportunity to get rid of an unneeded "impure" flag.
9119     + * Removing the "impure" xattr is best effort.
9120     + */
9121     + if (!ovl_want_write(dentry)) {
9122     + ovl_do_removexattr(ovl_dentry_upper(dentry),
9123     + OVL_XATTR_IMPURE);
9124     + ovl_drop_write(dentry);
9125     + }
9126     ovl_clear_flag(OVL_IMPURE, d_inode(dentry));
9127     kfree(cache);
9128     return NULL;
9129     @@ -769,10 +776,14 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
9130     struct dentry *dentry = file->f_path.dentry;
9131     struct file *realfile = od->realfile;
9132    
9133     + /* Nothing to sync for lower */
9134     + if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
9135     + return 0;
9136     +
9137     /*
9138     * Need to check if we started out being a lower dir, but got copied up
9139     */
9140     - if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
9141     + if (!od->is_upper) {
9142     struct inode *inode = file_inode(file);
9143    
9144     realfile = READ_ONCE(od->upperfile);
9145     diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
9146     index 76440feb79f6..e3d5fb651f9a 100644
9147     --- a/fs/overlayfs/super.c
9148     +++ b/fs/overlayfs/super.c
9149     @@ -211,6 +211,7 @@ static void ovl_destroy_inode(struct inode *inode)
9150     struct ovl_inode *oi = OVL_I(inode);
9151    
9152     dput(oi->__upperdentry);
9153     + iput(oi->lower);
9154     kfree(oi->redirect);
9155     ovl_dir_cache_free(inode);
9156     mutex_destroy(&oi->lock);
9157     @@ -520,10 +521,6 @@ static struct dentry *ovl_workdir_create(struct ovl_fs *ofs,
9158     bool retried = false;
9159     bool locked = false;
9160    
9161     - err = mnt_want_write(mnt);
9162     - if (err)
9163     - goto out_err;
9164     -
9165     inode_lock_nested(dir, I_MUTEX_PARENT);
9166     locked = true;
9167    
9168     @@ -588,7 +585,6 @@ static struct dentry *ovl_workdir_create(struct ovl_fs *ofs,
9169     goto out_err;
9170     }
9171     out_unlock:
9172     - mnt_drop_write(mnt);
9173     if (locked)
9174     inode_unlock(dir);
9175    
9176     @@ -703,7 +699,8 @@ static int ovl_lower_dir(const char *name, struct path *path,
9177     * The inodes index feature needs to encode and decode file
9178     * handles, so it requires that all layers support them.
9179     */
9180     - if (ofs->config.index && !ovl_can_decode_fh(path->dentry->d_sb)) {
9181     + if (ofs->config.index && ofs->config.upperdir &&
9182     + !ovl_can_decode_fh(path->dentry->d_sb)) {
9183     ofs->config.index = false;
9184     pr_warn("overlayfs: fs on '%s' does not support file handles, falling back to index=off.\n", name);
9185     }
9186     @@ -929,12 +926,17 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
9187    
9188     static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
9189     {
9190     + struct vfsmount *mnt = ofs->upper_mnt;
9191     struct dentry *temp;
9192     int err;
9193    
9194     + err = mnt_want_write(mnt);
9195     + if (err)
9196     + return err;
9197     +
9198     ofs->workdir = ovl_workdir_create(ofs, OVL_WORKDIR_NAME, false);
9199     if (!ofs->workdir)
9200     - return 0;
9201     + goto out;
9202    
9203     /*
9204     * Upper should support d_type, else whiteouts are visible. Given
9205     @@ -944,7 +946,7 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
9206     */
9207     err = ovl_check_d_type_supported(workpath);
9208     if (err < 0)
9209     - return err;
9210     + goto out;
9211    
9212     /*
9213     * We allowed this configuration and don't want to break users over
9214     @@ -968,6 +970,7 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
9215     if (err) {
9216     ofs->noxattr = true;
9217     pr_warn("overlayfs: upper fs does not support xattr.\n");
9218     + err = 0;
9219     } else {
9220     vfs_removexattr(ofs->workdir, OVL_XATTR_OPAQUE);
9221     }
9222     @@ -979,7 +982,9 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
9223     pr_warn("overlayfs: upper fs does not support file handles, falling back to index=off.\n");
9224     }
9225    
9226     - return 0;
9227     +out:
9228     + mnt_drop_write(mnt);
9229     + return err;
9230     }
9231    
9232     static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
9233     @@ -1026,8 +1031,13 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
9234     static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe,
9235     struct path *upperpath)
9236     {
9237     + struct vfsmount *mnt = ofs->upper_mnt;
9238     int err;
9239    
9240     + err = mnt_want_write(mnt);
9241     + if (err)
9242     + return err;
9243     +
9244     /* Verify lower root is upper root origin */
9245     err = ovl_verify_origin(upperpath->dentry, oe->lowerstack[0].dentry,
9246     false, true);
9247     @@ -1055,6 +1065,7 @@ static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe,
9248     pr_warn("overlayfs: try deleting index dir or mounting with '-o index=off' to disable inodes index.\n");
9249    
9250     out:
9251     + mnt_drop_write(mnt);
9252     return err;
9253     }
9254    
9255     @@ -1257,11 +1268,16 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
9256     if (err)
9257     goto out_free_oe;
9258    
9259     - if (!ofs->indexdir)
9260     + /* Force r/o mount with no index dir */
9261     + if (!ofs->indexdir) {
9262     + dput(ofs->workdir);
9263     + ofs->workdir = NULL;
9264     sb->s_flags |= SB_RDONLY;
9265     + }
9266     +
9267     }
9268    
9269     - /* Show index=off/on in /proc/mounts for any of the reasons above */
9270     + /* Show index=off in /proc/mounts for forced r/o mount */
9271     if (!ofs->indexdir)
9272     ofs->config.index = false;
9273    
9274     diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
9275     index d6bb1c9f5e7a..06119f34a69d 100644
9276     --- a/fs/overlayfs/util.c
9277     +++ b/fs/overlayfs/util.c
9278     @@ -257,7 +257,7 @@ void ovl_inode_init(struct inode *inode, struct dentry *upperdentry,
9279     if (upperdentry)
9280     OVL_I(inode)->__upperdentry = upperdentry;
9281     if (lowerdentry)
9282     - OVL_I(inode)->lower = d_inode(lowerdentry);
9283     + OVL_I(inode)->lower = igrab(d_inode(lowerdentry));
9284    
9285     ovl_copyattr(d_inode(upperdentry ?: lowerdentry), inode);
9286     }
9287     @@ -273,7 +273,7 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
9288     */
9289     smp_wmb();
9290     OVL_I(inode)->__upperdentry = upperdentry;
9291     - if (!S_ISDIR(upperinode->i_mode) && inode_unhashed(inode)) {
9292     + if (inode_unhashed(inode)) {
9293     inode->i_private = upperinode;
9294     __insert_inode_hash(inode, (unsigned long) upperinode);
9295     }
9296     diff --git a/fs/pipe.c b/fs/pipe.c
9297     index 6d98566201ef..b37a59f84dd0 100644
9298     --- a/fs/pipe.c
9299     +++ b/fs/pipe.c
9300     @@ -610,12 +610,17 @@ static unsigned long account_pipe_buffers(struct user_struct *user,
9301    
9302     static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
9303     {
9304     - return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft;
9305     + return pipe_user_pages_soft && user_bufs > pipe_user_pages_soft;
9306     }
9307    
9308     static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
9309     {
9310     - return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard;
9311     + return pipe_user_pages_hard && user_bufs > pipe_user_pages_hard;
9312     +}
9313     +
9314     +static bool is_unprivileged_user(void)
9315     +{
9316     + return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
9317     }
9318    
9319     struct pipe_inode_info *alloc_pipe_info(void)
9320     @@ -634,12 +639,12 @@ struct pipe_inode_info *alloc_pipe_info(void)
9321    
9322     user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
9323    
9324     - if (too_many_pipe_buffers_soft(user_bufs)) {
9325     + if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
9326     user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
9327     pipe_bufs = 1;
9328     }
9329    
9330     - if (too_many_pipe_buffers_hard(user_bufs))
9331     + if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
9332     goto out_revert_acct;
9333    
9334     pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
9335     @@ -1069,7 +1074,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
9336     if (nr_pages > pipe->buffers &&
9337     (too_many_pipe_buffers_hard(user_bufs) ||
9338     too_many_pipe_buffers_soft(user_bufs)) &&
9339     - !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
9340     + is_unprivileged_user()) {
9341     ret = -EPERM;
9342     goto out_revert_acct;
9343     }
9344     diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
9345     index 4bc85cb8be6a..e8a93bc8285d 100644
9346     --- a/fs/proc/kcore.c
9347     +++ b/fs/proc/kcore.c
9348     @@ -512,23 +512,15 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
9349     return -EFAULT;
9350     } else {
9351     if (kern_addr_valid(start)) {
9352     - unsigned long n;
9353     -
9354     /*
9355     * Using bounce buffer to bypass the
9356     * hardened user copy kernel text checks.
9357     */
9358     - memcpy(buf, (char *) start, tsz);
9359     - n = copy_to_user(buffer, buf, tsz);
9360     - /*
9361     - * We cannot distinguish between fault on source
9362     - * and fault on destination. When this happens
9363     - * we clear too and hope it will trigger the
9364     - * EFAULT again.
9365     - */
9366     - if (n) {
9367     - if (clear_user(buffer + tsz - n,
9368     - n))
9369     + if (probe_kernel_read(buf, (void *) start, tsz)) {
9370     + if (clear_user(buffer, tsz))
9371     + return -EFAULT;
9372     + } else {
9373     + if (copy_to_user(buffer, buf, tsz))
9374     return -EFAULT;
9375     }
9376     } else {
9377     diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
9378     index 417fe0b29f23..ef820f803176 100644
9379     --- a/fs/ubifs/dir.c
9380     +++ b/fs/ubifs/dir.c
9381     @@ -1216,10 +1216,8 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
9382     ostr.len = disk_link.len;
9383    
9384     err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
9385     - if (err) {
9386     - kfree(sd);
9387     + if (err)
9388     goto out_inode;
9389     - }
9390    
9391     sd->len = cpu_to_le16(ostr.len);
9392     disk_link.name = (char *)sd;
9393     @@ -1251,11 +1249,10 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
9394     goto out_cancel;
9395     mutex_unlock(&dir_ui->ui_mutex);
9396    
9397     - ubifs_release_budget(c, &req);
9398     insert_inode_hash(inode);
9399     d_instantiate(dentry, inode);
9400     - fscrypt_free_filename(&nm);
9401     - return 0;
9402     + err = 0;
9403     + goto out_fname;
9404    
9405     out_cancel:
9406     dir->i_size -= sz_change;
9407     @@ -1268,6 +1265,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
9408     fscrypt_free_filename(&nm);
9409     out_budg:
9410     ubifs_release_budget(c, &req);
9411     + kfree(sd);
9412     return err;
9413     }
9414    
9415     diff --git a/include/crypto/hash.h b/include/crypto/hash.h
9416     index 0ed31fd80242..3880793e280e 100644
9417     --- a/include/crypto/hash.h
9418     +++ b/include/crypto/hash.h
9419     @@ -210,7 +210,6 @@ struct crypto_ahash {
9420     unsigned int keylen);
9421    
9422     unsigned int reqsize;
9423     - bool has_setkey;
9424     struct crypto_tfm base;
9425     };
9426    
9427     @@ -410,11 +409,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
9428     int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
9429     unsigned int keylen);
9430    
9431     -static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
9432     -{
9433     - return tfm->has_setkey;
9434     -}
9435     -
9436     /**
9437     * crypto_ahash_finup() - update and finalize message digest
9438     * @req: reference to the ahash_request handle that holds all information
9439     @@ -487,7 +481,12 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out)
9440     */
9441     static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
9442     {
9443     - return crypto_ahash_reqtfm(req)->import(req, in);
9444     + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
9445     +
9446     + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
9447     + return -ENOKEY;
9448     +
9449     + return tfm->import(req, in);
9450     }
9451    
9452     /**
9453     @@ -503,7 +502,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
9454     */
9455     static inline int crypto_ahash_init(struct ahash_request *req)
9456     {
9457     - return crypto_ahash_reqtfm(req)->init(req);
9458     + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
9459     +
9460     + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
9461     + return -ENOKEY;
9462     +
9463     + return tfm->init(req);
9464     }
9465    
9466     /**
9467     @@ -855,7 +859,12 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
9468     */
9469     static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
9470     {
9471     - return crypto_shash_alg(desc->tfm)->import(desc, in);
9472     + struct crypto_shash *tfm = desc->tfm;
9473     +
9474     + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
9475     + return -ENOKEY;
9476     +
9477     + return crypto_shash_alg(tfm)->import(desc, in);
9478     }
9479    
9480     /**
9481     @@ -871,7 +880,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
9482     */
9483     static inline int crypto_shash_init(struct shash_desc *desc)
9484     {
9485     - return crypto_shash_alg(desc->tfm)->init(desc);
9486     + struct crypto_shash *tfm = desc->tfm;
9487     +
9488     + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
9489     + return -ENOKEY;
9490     +
9491     + return crypto_shash_alg(tfm)->init(desc);
9492     }
9493    
9494     /**
9495     diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
9496     index c2bae8da642c..27040a46d50a 100644
9497     --- a/include/crypto/internal/hash.h
9498     +++ b/include/crypto/internal/hash.h
9499     @@ -90,6 +90,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
9500     return alg->setkey != shash_no_setkey;
9501     }
9502    
9503     +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
9504     +
9505     int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
9506     struct hash_alg_common *alg,
9507     struct crypto_instance *inst);
9508     diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
9509     index c65567d01e8e..f718a19da82f 100644
9510     --- a/include/crypto/poly1305.h
9511     +++ b/include/crypto/poly1305.h
9512     @@ -31,8 +31,6 @@ struct poly1305_desc_ctx {
9513     };
9514    
9515     int crypto_poly1305_init(struct shash_desc *desc);
9516     -int crypto_poly1305_setkey(struct crypto_shash *tfm,
9517     - const u8 *key, unsigned int keylen);
9518     unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
9519     const u8 *src, unsigned int srclen);
9520     int crypto_poly1305_update(struct shash_desc *desc,
9521     diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
9522     new file mode 100644
9523     index 000000000000..e518e4e3dfb5
9524     --- /dev/null
9525     +++ b/include/kvm/arm_psci.h
9526     @@ -0,0 +1,51 @@
9527     +/*
9528     + * Copyright (C) 2012,2013 - ARM Ltd
9529     + * Author: Marc Zyngier <marc.zyngier@arm.com>
9530     + *
9531     + * This program is free software; you can redistribute it and/or modify
9532     + * it under the terms of the GNU General Public License version 2 as
9533     + * published by the Free Software Foundation.
9534     + *
9535     + * This program is distributed in the hope that it will be useful,
9536     + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9537     + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9538     + * GNU General Public License for more details.
9539     + *
9540     + * You should have received a copy of the GNU General Public License
9541     + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9542     + */
9543     +
9544     +#ifndef __KVM_ARM_PSCI_H__
9545     +#define __KVM_ARM_PSCI_H__
9546     +
9547     +#include <linux/kvm_host.h>
9548     +#include <uapi/linux/psci.h>
9549     +
9550     +#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
9551     +#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
9552     +#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
9553     +
9554     +#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
9555     +
9556     +/*
9557     + * We need the KVM pointer independently from the vcpu as we can call
9558     + * this from HYP, and need to apply kern_hyp_va on it...
9559     + */
9560     +static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
9561     +{
9562     + /*
9563     + * Our PSCI implementation stays the same across versions from
9564     + * v0.2 onward, only adding the few mandatory functions (such
9565     + * as FEATURES with 1.0) that are required by newer
9566     + * revisions. It is thus safe to return the latest.
9567     + */
9568     + if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
9569     + return KVM_ARM_PSCI_LATEST;
9570     +
9571     + return KVM_ARM_PSCI_0_1;
9572     +}
9573     +
9574     +
9575     +int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
9576     +
9577     +#endif /* __KVM_ARM_PSCI_H__ */
9578     diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
9579     index 4c5bca38c653..a031897fca76 100644
9580     --- a/include/linux/arm-smccc.h
9581     +++ b/include/linux/arm-smccc.h
9582     @@ -14,14 +14,16 @@
9583     #ifndef __LINUX_ARM_SMCCC_H
9584     #define __LINUX_ARM_SMCCC_H
9585    
9586     +#include <uapi/linux/const.h>
9587     +
9588     /*
9589     * This file provides common defines for ARM SMC Calling Convention as
9590     * specified in
9591     * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
9592     */
9593    
9594     -#define ARM_SMCCC_STD_CALL 0
9595     -#define ARM_SMCCC_FAST_CALL 1
9596     +#define ARM_SMCCC_STD_CALL _AC(0,U)
9597     +#define ARM_SMCCC_FAST_CALL _AC(1,U)
9598     #define ARM_SMCCC_TYPE_SHIFT 31
9599    
9600     #define ARM_SMCCC_SMC_32 0
9601     @@ -60,6 +62,24 @@
9602     #define ARM_SMCCC_QUIRK_NONE 0
9603     #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
9604    
9605     +#define ARM_SMCCC_VERSION_1_0 0x10000
9606     +#define ARM_SMCCC_VERSION_1_1 0x10001
9607     +
9608     +#define ARM_SMCCC_VERSION_FUNC_ID \
9609     + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
9610     + ARM_SMCCC_SMC_32, \
9611     + 0, 0)
9612     +
9613     +#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \
9614     + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
9615     + ARM_SMCCC_SMC_32, \
9616     + 0, 1)
9617     +
9618     +#define ARM_SMCCC_ARCH_WORKAROUND_1 \
9619     + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
9620     + ARM_SMCCC_SMC_32, \
9621     + 0, 0x8000)
9622     +
9623     #ifndef __ASSEMBLY__
9624    
9625     #include <linux/linkage.h>
9626     @@ -130,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
9627    
9628     #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
9629    
9630     +/* SMCCC v1.1 implementation madness follows */
9631     +#ifdef CONFIG_ARM64
9632     +
9633     +#define SMCCC_SMC_INST "smc #0"
9634     +#define SMCCC_HVC_INST "hvc #0"
9635     +
9636     +#elif defined(CONFIG_ARM)
9637     +#include <asm/opcodes-sec.h>
9638     +#include <asm/opcodes-virt.h>
9639     +
9640     +#define SMCCC_SMC_INST __SMC(0)
9641     +#define SMCCC_HVC_INST __HVC(0)
9642     +
9643     +#endif
9644     +
9645     +#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
9646     +
9647     +#define __count_args(...) \
9648     + ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
9649     +
9650     +#define __constraint_write_0 \
9651     + "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
9652     +#define __constraint_write_1 \
9653     + "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
9654     +#define __constraint_write_2 \
9655     + "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
9656     +#define __constraint_write_3 \
9657     + "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
9658     +#define __constraint_write_4 __constraint_write_3
9659     +#define __constraint_write_5 __constraint_write_4
9660     +#define __constraint_write_6 __constraint_write_5
9661     +#define __constraint_write_7 __constraint_write_6
9662     +
9663     +#define __constraint_read_0
9664     +#define __constraint_read_1
9665     +#define __constraint_read_2
9666     +#define __constraint_read_3
9667     +#define __constraint_read_4 "r" (r4)
9668     +#define __constraint_read_5 __constraint_read_4, "r" (r5)
9669     +#define __constraint_read_6 __constraint_read_5, "r" (r6)
9670     +#define __constraint_read_7 __constraint_read_6, "r" (r7)
9671     +
9672     +#define __declare_arg_0(a0, res) \
9673     + struct arm_smccc_res *___res = res; \
9674     + register u32 r0 asm("r0") = a0; \
9675     + register unsigned long r1 asm("r1"); \
9676     + register unsigned long r2 asm("r2"); \
9677     + register unsigned long r3 asm("r3")
9678     +
9679     +#define __declare_arg_1(a0, a1, res) \
9680     + struct arm_smccc_res *___res = res; \
9681     + register u32 r0 asm("r0") = a0; \
9682     + register typeof(a1) r1 asm("r1") = a1; \
9683     + register unsigned long r2 asm("r2"); \
9684     + register unsigned long r3 asm("r3")
9685     +
9686     +#define __declare_arg_2(a0, a1, a2, res) \
9687     + struct arm_smccc_res *___res = res; \
9688     + register u32 r0 asm("r0") = a0; \
9689     + register typeof(a1) r1 asm("r1") = a1; \
9690     + register typeof(a2) r2 asm("r2") = a2; \
9691     + register unsigned long r3 asm("r3")
9692     +
9693     +#define __declare_arg_3(a0, a1, a2, a3, res) \
9694     + struct arm_smccc_res *___res = res; \
9695     + register u32 r0 asm("r0") = a0; \
9696     + register typeof(a1) r1 asm("r1") = a1; \
9697     + register typeof(a2) r2 asm("r2") = a2; \
9698     + register typeof(a3) r3 asm("r3") = a3
9699     +
9700     +#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
9701     + __declare_arg_3(a0, a1, a2, a3, res); \
9702     + register typeof(a4) r4 asm("r4") = a4
9703     +
9704     +#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
9705     + __declare_arg_4(a0, a1, a2, a3, a4, res); \
9706     + register typeof(a5) r5 asm("r5") = a5
9707     +
9708     +#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
9709     + __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
9710     + register typeof(a6) r6 asm("r6") = a6
9711     +
9712     +#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
9713     + __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
9714     + register typeof(a7) r7 asm("r7") = a7
9715     +
9716     +#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
9717     +#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
9718     +
9719     +#define ___constraints(count) \
9720     + : __constraint_write_ ## count \
9721     + : __constraint_read_ ## count \
9722     + : "memory"
9723     +#define __constraints(count) ___constraints(count)
9724     +
9725     +/*
9726     + * We have an output list that is not necessarily used, and GCC feels
9727     + * entitled to optimise the whole sequence away. "volatile" is what
9728     + * makes it stick.
9729     + */
9730     +#define __arm_smccc_1_1(inst, ...) \
9731     + do { \
9732     + __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
9733     + asm volatile(inst "\n" \
9734     + __constraints(__count_args(__VA_ARGS__))); \
9735     + if (___res) \
9736     + *___res = (typeof(*___res)){r0, r1, r2, r3}; \
9737     + } while (0)
9738     +
9739     +/*
9740     + * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
9741     + *
9742     + * This is a variadic macro taking one to eight source arguments, and
9743     + * an optional return structure.
9744     + *
9745     + * @a0-a7: arguments passed in registers 0 to 7
9746     + * @res: result values from registers 0 to 3
9747     + *
9748     + * This macro is used to make SMC calls following SMC Calling Convention v1.1.
9749     + * The content of the supplied param are copied to registers 0 to 7 prior
9750     + * to the SMC instruction. The return values are updated with the content
9751     + * from register 0 to 3 on return from the SMC instruction if not NULL.
9752     + */
9753     +#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
9754     +
9755     +/*
9756     + * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
9757     + *
9758     + * This is a variadic macro taking one to eight source arguments, and
9759     + * an optional return structure.
9760     + *
9761     + * @a0-a7: arguments passed in registers 0 to 7
9762     + * @res: result values from registers 0 to 3
9763     + *
9764     + * This macro is used to make HVC calls following SMC Calling Convention v1.1.
9765     + * The content of the supplied param are copied to registers 0 to 7 prior
9766     + * to the HVC instruction. The return values are updated with the content
9767     + * from register 0 to 3 on return from the HVC instruction if not NULL.
9768     + */
9769     +#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
9770     +
9771     #endif /*__ASSEMBLY__*/
9772     #endif /*__LINUX_ARM_SMCCC_H*/
9773     diff --git a/include/linux/crypto.h b/include/linux/crypto.h
9774     index 78508ca4b108..29c4257f9c5b 100644
9775     --- a/include/linux/crypto.h
9776     +++ b/include/linux/crypto.h
9777     @@ -106,9 +106,17 @@
9778     */
9779     #define CRYPTO_ALG_INTERNAL 0x00002000
9780    
9781     +/*
9782     + * Set if the algorithm has a ->setkey() method but can be used without
9783     + * calling it first, i.e. there is a default key.
9784     + */
9785     +#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
9786     +
9787     /*
9788     * Transform masks and values (for crt_flags).
9789     */
9790     +#define CRYPTO_TFM_NEED_KEY 0x00000001
9791     +
9792     #define CRYPTO_TFM_REQ_MASK 0x000fff00
9793     #define CRYPTO_TFM_RES_MASK 0xfff00000
9794    
9795     diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
9796     index 3aa56e3104bb..b5b43f94f311 100644
9797     --- a/include/linux/mtd/map.h
9798     +++ b/include/linux/mtd/map.h
9799     @@ -270,75 +270,67 @@ void map_destroy(struct mtd_info *mtd);
9800     #define INVALIDATE_CACHED_RANGE(map, from, size) \
9801     do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
9802    
9803     -
9804     -static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
9805     -{
9806     - int i;
9807     -
9808     - for (i = 0; i < map_words(map); i++) {
9809     - if (val1.x[i] != val2.x[i])
9810     - return 0;
9811     - }
9812     -
9813     - return 1;
9814     -}
9815     -
9816     -static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
9817     -{
9818     - map_word r;
9819     - int i;
9820     -
9821     - for (i = 0; i < map_words(map); i++)
9822     - r.x[i] = val1.x[i] & val2.x[i];
9823     -
9824     - return r;
9825     -}
9826     -
9827     -static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
9828     -{
9829     - map_word r;
9830     - int i;
9831     -
9832     - for (i = 0; i < map_words(map); i++)
9833     - r.x[i] = val1.x[i] & ~val2.x[i];
9834     -
9835     - return r;
9836     -}
9837     -
9838     -static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
9839     -{
9840     - map_word r;
9841     - int i;
9842     -
9843     - for (i = 0; i < map_words(map); i++)
9844     - r.x[i] = val1.x[i] | val2.x[i];
9845     -
9846     - return r;
9847     -}
9848     -
9849     -static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
9850     -{
9851     - int i;
9852     -
9853     - for (i = 0; i < map_words(map); i++) {
9854     - if ((val1.x[i] & val2.x[i]) != val3.x[i])
9855     - return 0;
9856     - }
9857     -
9858     - return 1;
9859     -}
9860     -
9861     -static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
9862     -{
9863     - int i;
9864     -
9865     - for (i = 0; i < map_words(map); i++) {
9866     - if (val1.x[i] & val2.x[i])
9867     - return 1;
9868     - }
9869     -
9870     - return 0;
9871     -}
9872     +#define map_word_equal(map, val1, val2) \
9873     +({ \
9874     + int i, ret = 1; \
9875     + for (i = 0; i < map_words(map); i++) \
9876     + if ((val1).x[i] != (val2).x[i]) { \
9877     + ret = 0; \
9878     + break; \
9879     + } \
9880     + ret; \
9881     +})
9882     +
9883     +#define map_word_and(map, val1, val2) \
9884     +({ \
9885     + map_word r; \
9886     + int i; \
9887     + for (i = 0; i < map_words(map); i++) \
9888     + r.x[i] = (val1).x[i] & (val2).x[i]; \
9889     + r; \
9890     +})
9891     +
9892     +#define map_word_clr(map, val1, val2) \
9893     +({ \
9894     + map_word r; \
9895     + int i; \
9896     + for (i = 0; i < map_words(map); i++) \
9897     + r.x[i] = (val1).x[i] & ~(val2).x[i]; \
9898     + r; \
9899     +})
9900     +
9901     +#define map_word_or(map, val1, val2) \
9902     +({ \
9903     + map_word r; \
9904     + int i; \
9905     + for (i = 0; i < map_words(map); i++) \
9906     + r.x[i] = (val1).x[i] | (val2).x[i]; \
9907     + r; \
9908     +})
9909     +
9910     +#define map_word_andequal(map, val1, val2, val3) \
9911     +({ \
9912     + int i, ret = 1; \
9913     + for (i = 0; i < map_words(map); i++) { \
9914     + if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
9915     + ret = 0; \
9916     + break; \
9917     + } \
9918     + } \
9919     + ret; \
9920     +})
9921     +
9922     +#define map_word_bitsset(map, val1, val2) \
9923     +({ \
9924     + int i, ret = 0; \
9925     + for (i = 0; i < map_words(map); i++) { \
9926     + if ((val1).x[i] & (val2).x[i]) { \
9927     + ret = 1; \
9928     + break; \
9929     + } \
9930     + } \
9931     + ret; \
9932     +})
9933    
9934     static inline map_word map_word_load(struct map_info *map, const void *ptr)
9935     {
9936     diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
9937     index 47adac640191..57ffaa20d564 100644
9938     --- a/include/linux/nfs4.h
9939     +++ b/include/linux/nfs4.h
9940     @@ -457,7 +457,12 @@ enum lock_type4 {
9941    
9942     #define NFS4_DEBUG 1
9943    
9944     -/* Index of predefined Linux client operations */
9945     +/*
9946     + * Index of predefined Linux client operations
9947     + *
9948     + * To ensure that /proc/net/rpc/nfs remains correctly ordered, please
9949     + * append only to this enum when adding new client operations.
9950     + */
9951    
9952     enum {
9953     NFSPROC4_CLNT_NULL = 0, /* Unused */
9954     @@ -480,7 +485,6 @@ enum {
9955     NFSPROC4_CLNT_ACCESS,
9956     NFSPROC4_CLNT_GETATTR,
9957     NFSPROC4_CLNT_LOOKUP,
9958     - NFSPROC4_CLNT_LOOKUPP,
9959     NFSPROC4_CLNT_LOOKUP_ROOT,
9960     NFSPROC4_CLNT_REMOVE,
9961     NFSPROC4_CLNT_RENAME,
9962     @@ -500,7 +504,6 @@ enum {
9963     NFSPROC4_CLNT_SECINFO,
9964     NFSPROC4_CLNT_FSID_PRESENT,
9965    
9966     - /* nfs41 */
9967     NFSPROC4_CLNT_EXCHANGE_ID,
9968     NFSPROC4_CLNT_CREATE_SESSION,
9969     NFSPROC4_CLNT_DESTROY_SESSION,
9970     @@ -518,13 +521,14 @@ enum {
9971     NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
9972     NFSPROC4_CLNT_DESTROY_CLIENTID,
9973    
9974     - /* nfs42 */
9975     NFSPROC4_CLNT_SEEK,
9976     NFSPROC4_CLNT_ALLOCATE,
9977     NFSPROC4_CLNT_DEALLOCATE,
9978     NFSPROC4_CLNT_LAYOUTSTATS,
9979     NFSPROC4_CLNT_CLONE,
9980     NFSPROC4_CLNT_COPY,
9981     +
9982     + NFSPROC4_CLNT_LOOKUPP,
9983     };
9984    
9985     /* nfs41 types */
9986     diff --git a/include/linux/psci.h b/include/linux/psci.h
9987     index bdea1cb5e1db..347077cf19c6 100644
9988     --- a/include/linux/psci.h
9989     +++ b/include/linux/psci.h
9990     @@ -25,7 +25,19 @@ bool psci_tos_resident_on(int cpu);
9991     int psci_cpu_init_idle(unsigned int cpu);
9992     int psci_cpu_suspend_enter(unsigned long index);
9993    
9994     +enum psci_conduit {
9995     + PSCI_CONDUIT_NONE,
9996     + PSCI_CONDUIT_SMC,
9997     + PSCI_CONDUIT_HVC,
9998     +};
9999     +
10000     +enum smccc_version {
10001     + SMCCC_VERSION_1_0,
10002     + SMCCC_VERSION_1_1,
10003     +};
10004     +
10005     struct psci_operations {
10006     + u32 (*get_version)(void);
10007     int (*cpu_suspend)(u32 state, unsigned long entry_point);
10008     int (*cpu_off)(u32 state);
10009     int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
10010     @@ -33,6 +45,8 @@ struct psci_operations {
10011     int (*affinity_info)(unsigned long target_affinity,
10012     unsigned long lowest_affinity_level);
10013     int (*migrate_info_type)(void);
10014     + enum psci_conduit conduit;
10015     + enum smccc_version smccc_version;
10016     };
10017    
10018     extern struct psci_operations psci_ops;
10019     diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
10020     index a8b7bf879ced..1a1df0d21ee3 100644
10021     --- a/include/scsi/scsi_host.h
10022     +++ b/include/scsi/scsi_host.h
10023     @@ -571,6 +571,8 @@ struct Scsi_Host {
10024     struct blk_mq_tag_set tag_set;
10025     };
10026    
10027     + struct rcu_head rcu;
10028     +
10029     atomic_t host_busy; /* commands actually active on low-level */
10030     atomic_t host_blocked;
10031    
10032     diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
10033     index 760e52a9640f..b3bcabe380da 100644
10034     --- a/include/uapi/linux/psci.h
10035     +++ b/include/uapi/linux/psci.h
10036     @@ -88,6 +88,9 @@
10037     (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
10038     #define PSCI_VERSION_MINOR(ver) \
10039     ((ver) & PSCI_VERSION_MINOR_MASK)
10040     +#define PSCI_VERSION(maj, min) \
10041     + ((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
10042     + ((min) & PSCI_VERSION_MINOR_MASK))
10043    
10044     /* PSCI features decoding (>=1.0) */
10045     #define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1
10046     diff --git a/kernel/async.c b/kernel/async.c
10047     index 2cbd3dd5940d..a893d6170944 100644
10048     --- a/kernel/async.c
10049     +++ b/kernel/async.c
10050     @@ -84,20 +84,24 @@ static atomic_t entry_count;
10051    
10052     static async_cookie_t lowest_in_progress(struct async_domain *domain)
10053     {
10054     - struct list_head *pending;
10055     + struct async_entry *first = NULL;
10056     async_cookie_t ret = ASYNC_COOKIE_MAX;
10057     unsigned long flags;
10058    
10059     spin_lock_irqsave(&async_lock, flags);
10060    
10061     - if (domain)
10062     - pending = &domain->pending;
10063     - else
10064     - pending = &async_global_pending;
10065     + if (domain) {
10066     + if (!list_empty(&domain->pending))
10067     + first = list_first_entry(&domain->pending,
10068     + struct async_entry, domain_list);
10069     + } else {
10070     + if (!list_empty(&async_global_pending))
10071     + first = list_first_entry(&async_global_pending,
10072     + struct async_entry, global_list);
10073     + }
10074    
10075     - if (!list_empty(pending))
10076     - ret = list_first_entry(pending, struct async_entry,
10077     - domain_list)->cookie;
10078     + if (first)
10079     + ret = first->cookie;
10080    
10081     spin_unlock_irqrestore(&async_lock, flags);
10082     return ret;
10083     diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
10084     index 4e8089b319ae..8c82ea26e837 100644
10085     --- a/kernel/irq/autoprobe.c
10086     +++ b/kernel/irq/autoprobe.c
10087     @@ -71,7 +71,7 @@ unsigned long probe_irq_on(void)
10088     raw_spin_lock_irq(&desc->lock);
10089     if (!desc->action && irq_settings_can_probe(desc)) {
10090     desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
10091     - if (irq_startup(desc, IRQ_NORESEND, IRQ_START_FORCE))
10092     + if (irq_activate_and_startup(desc, IRQ_NORESEND))
10093     desc->istate |= IRQS_PENDING;
10094     }
10095     raw_spin_unlock_irq(&desc->lock);
10096     diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
10097     index 043bfc35b353..c69357a43849 100644
10098     --- a/kernel/irq/chip.c
10099     +++ b/kernel/irq/chip.c
10100     @@ -294,11 +294,11 @@ int irq_activate(struct irq_desc *desc)
10101     return 0;
10102     }
10103    
10104     -void irq_activate_and_startup(struct irq_desc *desc, bool resend)
10105     +int irq_activate_and_startup(struct irq_desc *desc, bool resend)
10106     {
10107     if (WARN_ON(irq_activate(desc)))
10108     - return;
10109     - irq_startup(desc, resend, IRQ_START_FORCE);
10110     + return 0;
10111     + return irq_startup(desc, resend, IRQ_START_FORCE);
10112     }
10113    
10114     static void __irq_disable(struct irq_desc *desc, bool mask);
10115     diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
10116     index ab19371eab9b..ca6afa267070 100644
10117     --- a/kernel/irq/internals.h
10118     +++ b/kernel/irq/internals.h
10119     @@ -76,7 +76,7 @@ extern void __enable_irq(struct irq_desc *desc);
10120     #define IRQ_START_COND false
10121    
10122     extern int irq_activate(struct irq_desc *desc);
10123     -extern void irq_activate_and_startup(struct irq_desc *desc, bool resend);
10124     +extern int irq_activate_and_startup(struct irq_desc *desc, bool resend);
10125     extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
10126    
10127     extern void irq_shutdown(struct irq_desc *desc);
10128     diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
10129     index fbd56d6e575b..68fa19a5e7bd 100644
10130     --- a/kernel/rcu/update.c
10131     +++ b/kernel/rcu/update.c
10132     @@ -422,11 +422,13 @@ void init_rcu_head(struct rcu_head *head)
10133     {
10134     debug_object_init(head, &rcuhead_debug_descr);
10135     }
10136     +EXPORT_SYMBOL_GPL(init_rcu_head);
10137    
10138     void destroy_rcu_head(struct rcu_head *head)
10139     {
10140     debug_object_free(head, &rcuhead_debug_descr);
10141     }
10142     +EXPORT_SYMBOL_GPL(destroy_rcu_head);
10143    
10144     static bool rcuhead_is_static_object(void *addr)
10145     {
10146     diff --git a/kernel/relay.c b/kernel/relay.c
10147     index 39a9dfc69486..55da824f4adc 100644
10148     --- a/kernel/relay.c
10149     +++ b/kernel/relay.c
10150     @@ -611,7 +611,6 @@ struct rchan *relay_open(const char *base_filename,
10151    
10152     kref_put(&chan->kref, relay_destroy_channel);
10153     mutex_unlock(&relay_channels_mutex);
10154     - kfree(chan);
10155     return NULL;
10156     }
10157     EXPORT_SYMBOL_GPL(relay_open);
10158     diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
10159     index 665ace2fc558..3401f588c916 100644
10160     --- a/kernel/sched/rt.c
10161     +++ b/kernel/sched/rt.c
10162     @@ -1907,9 +1907,8 @@ static void push_rt_tasks(struct rq *rq)
10163     * the rt_loop_next will cause the iterator to perform another scan.
10164     *
10165     */
10166     -static int rto_next_cpu(struct rq *rq)
10167     +static int rto_next_cpu(struct root_domain *rd)
10168     {
10169     - struct root_domain *rd = rq->rd;
10170     int next;
10171     int cpu;
10172    
10173     @@ -1985,19 +1984,24 @@ static void tell_cpu_to_push(struct rq *rq)
10174     * Otherwise it is finishing up and an ipi needs to be sent.
10175     */
10176     if (rq->rd->rto_cpu < 0)
10177     - cpu = rto_next_cpu(rq);
10178     + cpu = rto_next_cpu(rq->rd);
10179    
10180     raw_spin_unlock(&rq->rd->rto_lock);
10181    
10182     rto_start_unlock(&rq->rd->rto_loop_start);
10183    
10184     - if (cpu >= 0)
10185     + if (cpu >= 0) {
10186     + /* Make sure the rd does not get freed while pushing */
10187     + sched_get_rd(rq->rd);
10188     irq_work_queue_on(&rq->rd->rto_push_work, cpu);
10189     + }
10190     }
10191    
10192     /* Called from hardirq context */
10193     void rto_push_irq_work_func(struct irq_work *work)
10194     {
10195     + struct root_domain *rd =
10196     + container_of(work, struct root_domain, rto_push_work);
10197     struct rq *rq;
10198     int cpu;
10199    
10200     @@ -2013,18 +2017,20 @@ void rto_push_irq_work_func(struct irq_work *work)
10201     raw_spin_unlock(&rq->lock);
10202     }
10203    
10204     - raw_spin_lock(&rq->rd->rto_lock);
10205     + raw_spin_lock(&rd->rto_lock);
10206    
10207     /* Pass the IPI to the next rt overloaded queue */
10208     - cpu = rto_next_cpu(rq);
10209     + cpu = rto_next_cpu(rd);
10210    
10211     - raw_spin_unlock(&rq->rd->rto_lock);
10212     + raw_spin_unlock(&rd->rto_lock);
10213    
10214     - if (cpu < 0)
10215     + if (cpu < 0) {
10216     + sched_put_rd(rd);
10217     return;
10218     + }
10219    
10220     /* Try the next RT overloaded CPU */
10221     - irq_work_queue_on(&rq->rd->rto_push_work, cpu);
10222     + irq_work_queue_on(&rd->rto_push_work, cpu);
10223     }
10224     #endif /* HAVE_RT_PUSH_IPI */
10225    
10226     diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
10227     index b19552a212de..74b57279e3ff 100644
10228     --- a/kernel/sched/sched.h
10229     +++ b/kernel/sched/sched.h
10230     @@ -665,6 +665,8 @@ extern struct mutex sched_domains_mutex;
10231     extern void init_defrootdomain(void);
10232     extern int sched_init_domains(const struct cpumask *cpu_map);
10233     extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
10234     +extern void sched_get_rd(struct root_domain *rd);
10235     +extern void sched_put_rd(struct root_domain *rd);
10236    
10237     #ifdef HAVE_RT_PUSH_IPI
10238     extern void rto_push_irq_work_func(struct irq_work *work);
10239     diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
10240     index 034cbed7f88b..519b024f4e94 100644
10241     --- a/kernel/sched/topology.c
10242     +++ b/kernel/sched/topology.c
10243     @@ -259,6 +259,19 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
10244     call_rcu_sched(&old_rd->rcu, free_rootdomain);
10245     }
10246    
10247     +void sched_get_rd(struct root_domain *rd)
10248     +{
10249     + atomic_inc(&rd->refcount);
10250     +}
10251     +
10252     +void sched_put_rd(struct root_domain *rd)
10253     +{
10254     + if (!atomic_dec_and_test(&rd->refcount))
10255     + return;
10256     +
10257     + call_rcu_sched(&rd->rcu, free_rootdomain);
10258     +}
10259     +
10260     static int init_rootdomain(struct root_domain *rd)
10261     {
10262     if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
10263     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
10264     index 554b517c61a0..a7741d14f51b 100644
10265     --- a/kernel/trace/ftrace.c
10266     +++ b/kernel/trace/ftrace.c
10267     @@ -4456,7 +4456,6 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
10268     func_g.type = filter_parse_regex(glob, strlen(glob),
10269     &func_g.search, &not);
10270     func_g.len = strlen(func_g.search);
10271     - func_g.search = glob;
10272    
10273     /* we do not support '!' for function probes */
10274     if (WARN_ON(not))
10275     diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
10276     index 9d5b78aad4c5..f55c15ab6f93 100644
10277     --- a/lib/Kconfig.debug
10278     +++ b/lib/Kconfig.debug
10279     @@ -217,7 +217,7 @@ config ENABLE_MUST_CHECK
10280     config FRAME_WARN
10281     int "Warn for stack frames larger than (needs gcc 4.4)"
10282     range 0 8192
10283     - default 0 if KASAN
10284     + default 3072 if KASAN_EXTRA
10285     default 2048 if GCC_PLUGIN_LATENT_ENTROPY
10286     default 1280 if (!64BIT && PARISC)
10287     default 1024 if (!64BIT && !PARISC)
10288     diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
10289     index bd38aab05929..3d35d062970d 100644
10290     --- a/lib/Kconfig.kasan
10291     +++ b/lib/Kconfig.kasan
10292     @@ -20,6 +20,17 @@ config KASAN
10293     Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB
10294     (the resulting kernel does not boot).
10295    
10296     +config KASAN_EXTRA
10297     + bool "KAsan: extra checks"
10298     + depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST
10299     + help
10300     + This enables further checks in the kernel address sanitizer, for now
10301     + it only includes the address-use-after-scope check that can lead
10302     + to excessive kernel stack usage, frame size warnings and longer
10303     + compile time.
10304     + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more
10305     +
10306     +
10307     choice
10308     prompt "Instrumentation type"
10309     depends on KASAN
10310     diff --git a/lib/ubsan.c b/lib/ubsan.c
10311     index fb0409df1bcf..50d1d5c25deb 100644
10312     --- a/lib/ubsan.c
10313     +++ b/lib/ubsan.c
10314     @@ -265,14 +265,14 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data,
10315     }
10316     EXPORT_SYMBOL(__ubsan_handle_divrem_overflow);
10317    
10318     -static void handle_null_ptr_deref(struct type_mismatch_data *data)
10319     +static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
10320     {
10321     unsigned long flags;
10322    
10323     - if (suppress_report(&data->location))
10324     + if (suppress_report(data->location))
10325     return;
10326    
10327     - ubsan_prologue(&data->location, &flags);
10328     + ubsan_prologue(data->location, &flags);
10329    
10330     pr_err("%s null pointer of type %s\n",
10331     type_check_kinds[data->type_check_kind],
10332     @@ -281,15 +281,15 @@ static void handle_null_ptr_deref(struct type_mismatch_data *data)
10333     ubsan_epilogue(&flags);
10334     }
10335    
10336     -static void handle_missaligned_access(struct type_mismatch_data *data,
10337     +static void handle_misaligned_access(struct type_mismatch_data_common *data,
10338     unsigned long ptr)
10339     {
10340     unsigned long flags;
10341    
10342     - if (suppress_report(&data->location))
10343     + if (suppress_report(data->location))
10344     return;
10345    
10346     - ubsan_prologue(&data->location, &flags);
10347     + ubsan_prologue(data->location, &flags);
10348    
10349     pr_err("%s misaligned address %p for type %s\n",
10350     type_check_kinds[data->type_check_kind],
10351     @@ -299,15 +299,15 @@ static void handle_missaligned_access(struct type_mismatch_data *data,
10352     ubsan_epilogue(&flags);
10353     }
10354    
10355     -static void handle_object_size_mismatch(struct type_mismatch_data *data,
10356     +static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
10357     unsigned long ptr)
10358     {
10359     unsigned long flags;
10360    
10361     - if (suppress_report(&data->location))
10362     + if (suppress_report(data->location))
10363     return;
10364    
10365     - ubsan_prologue(&data->location, &flags);
10366     + ubsan_prologue(data->location, &flags);
10367     pr_err("%s address %p with insufficient space\n",
10368     type_check_kinds[data->type_check_kind],
10369     (void *) ptr);
10370     @@ -315,19 +315,47 @@ static void handle_object_size_mismatch(struct type_mismatch_data *data,
10371     ubsan_epilogue(&flags);
10372     }
10373    
10374     -void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
10375     +static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
10376     unsigned long ptr)
10377     {
10378    
10379     if (!ptr)
10380     handle_null_ptr_deref(data);
10381     else if (data->alignment && !IS_ALIGNED(ptr, data->alignment))
10382     - handle_missaligned_access(data, ptr);
10383     + handle_misaligned_access(data, ptr);
10384     else
10385     handle_object_size_mismatch(data, ptr);
10386     }
10387     +
10388     +void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
10389     + unsigned long ptr)
10390     +{
10391     + struct type_mismatch_data_common common_data = {
10392     + .location = &data->location,
10393     + .type = data->type,
10394     + .alignment = data->alignment,
10395     + .type_check_kind = data->type_check_kind
10396     + };
10397     +
10398     + ubsan_type_mismatch_common(&common_data, ptr);
10399     +}
10400     EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
10401    
10402     +void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
10403     + unsigned long ptr)
10404     +{
10405     +
10406     + struct type_mismatch_data_common common_data = {
10407     + .location = &data->location,
10408     + .type = data->type,
10409     + .alignment = 1UL << data->log_alignment,
10410     + .type_check_kind = data->type_check_kind
10411     + };
10412     +
10413     + ubsan_type_mismatch_common(&common_data, ptr);
10414     +}
10415     +EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
10416     +
10417     void __ubsan_handle_nonnull_return(struct nonnull_return_data *data)
10418     {
10419     unsigned long flags;
10420     diff --git a/lib/ubsan.h b/lib/ubsan.h
10421     index 88f23557edbe..7e30b26497e0 100644
10422     --- a/lib/ubsan.h
10423     +++ b/lib/ubsan.h
10424     @@ -37,6 +37,20 @@ struct type_mismatch_data {
10425     unsigned char type_check_kind;
10426     };
10427    
10428     +struct type_mismatch_data_v1 {
10429     + struct source_location location;
10430     + struct type_descriptor *type;
10431     + unsigned char log_alignment;
10432     + unsigned char type_check_kind;
10433     +};
10434     +
10435     +struct type_mismatch_data_common {
10436     + struct source_location *location;
10437     + struct type_descriptor *type;
10438     + unsigned long alignment;
10439     + unsigned char type_check_kind;
10440     +};
10441     +
10442     struct nonnull_arg_data {
10443     struct source_location location;
10444     struct source_location attr_location;
10445     diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
10446     index 1ce7115aa499..97a56c0b565a 100644
10447     --- a/scripts/Makefile.kasan
10448     +++ b/scripts/Makefile.kasan
10449     @@ -30,5 +30,10 @@ else
10450     endif
10451     endif
10452    
10453     +ifdef CONFIG_KASAN_EXTRA
10454     CFLAGS_KASAN += $(call cc-option, -fsanitize-address-use-after-scope)
10455     endif
10456     +
10457     +CFLAGS_KASAN_NOSANITIZE := -fno-builtin
10458     +
10459     +endif
10460     diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
10461     index 1ca4dcd2d500..015aa9dbad86 100644
10462     --- a/scripts/Makefile.lib
10463     +++ b/scripts/Makefile.lib
10464     @@ -121,7 +121,7 @@ endif
10465     ifeq ($(CONFIG_KASAN),y)
10466     _c_flags += $(if $(patsubst n%,, \
10467     $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
10468     - $(CFLAGS_KASAN))
10469     + $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE))
10470     endif
10471    
10472     ifeq ($(CONFIG_UBSAN),y)
10473     diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
10474     index 3eaac41090ca..26b0a5caea5a 100644
10475     --- a/sound/soc/intel/skylake/skl-nhlt.c
10476     +++ b/sound/soc/intel/skylake/skl-nhlt.c
10477     @@ -43,7 +43,8 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
10478     obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
10479     if (obj && obj->type == ACPI_TYPE_BUFFER) {
10480     nhlt_ptr = (struct nhlt_resource_desc *)obj->buffer.pointer;
10481     - nhlt_table = (struct nhlt_acpi_table *)
10482     + if (nhlt_ptr->length)
10483     + nhlt_table = (struct nhlt_acpi_table *)
10484     memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
10485     MEMREMAP_WB);
10486     ACPI_FREE(obj);
10487     diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
10488     index 908211e1d6fc..eb27f6c24bf7 100644
10489     --- a/sound/soc/rockchip/rockchip_i2s.c
10490     +++ b/sound/soc/rockchip/rockchip_i2s.c
10491     @@ -504,6 +504,7 @@ static bool rockchip_i2s_rd_reg(struct device *dev, unsigned int reg)
10492     case I2S_INTCR:
10493     case I2S_XFER:
10494     case I2S_CLR:
10495     + case I2S_TXDR:
10496     case I2S_RXDR:
10497     case I2S_FIFOLR:
10498     case I2S_INTSR:
10499     @@ -518,6 +519,9 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
10500     switch (reg) {
10501     case I2S_INTSR:
10502     case I2S_CLR:
10503     + case I2S_FIFOLR:
10504     + case I2S_TXDR:
10505     + case I2S_RXDR:
10506     return true;
10507     default:
10508     return false;
10509     @@ -527,6 +531,8 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
10510     static bool rockchip_i2s_precious_reg(struct device *dev, unsigned int reg)
10511     {
10512     switch (reg) {
10513     + case I2S_RXDR:
10514     + return true;
10515     default:
10516     return false;
10517     }
10518     diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
10519     index f21df28bc28e..d4dd2efea45e 100644
10520     --- a/sound/soc/soc-acpi.c
10521     +++ b/sound/soc/soc-acpi.c
10522     @@ -84,11 +84,9 @@ snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
10523    
10524     for (mach = machines; mach->id[0]; mach++) {
10525     if (snd_soc_acpi_check_hid(mach->id) == true) {
10526     - if (mach->machine_quirk == NULL)
10527     - return mach;
10528     -
10529     - if (mach->machine_quirk(mach) != NULL)
10530     - return mach;
10531     + if (mach->machine_quirk)
10532     + mach = mach->machine_quirk(mach);
10533     + return mach;
10534     }
10535     }
10536     return NULL;
10537     diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
10538     index d9b1e6417fb9..1507117d1185 100644
10539     --- a/sound/soc/soc-compress.c
10540     +++ b/sound/soc/soc-compress.c
10541     @@ -944,7 +944,7 @@ static int soc_compr_copy(struct snd_compr_stream *cstream,
10542     struct snd_soc_platform *platform = rtd->platform;
10543     struct snd_soc_component *component;
10544     struct snd_soc_rtdcom_list *rtdcom;
10545     - int ret = 0, __ret;
10546     + int ret = 0;
10547    
10548     mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
10549    
10550     @@ -965,10 +965,10 @@ static int soc_compr_copy(struct snd_compr_stream *cstream,
10551     !component->driver->compr_ops->copy)
10552     continue;
10553    
10554     - __ret = component->driver->compr_ops->copy(cstream, buf, count);
10555     - if (__ret < 0)
10556     - ret = __ret;
10557     + ret = component->driver->compr_ops->copy(cstream, buf, count);
10558     + break;
10559     }
10560     +
10561     err:
10562     mutex_unlock(&rtd->pcm_mutex);
10563     return ret;
10564     diff --git a/tools/objtool/check.c b/tools/objtool/check.c
10565     index 9cd028aa1509..2e458eb45586 100644
10566     --- a/tools/objtool/check.c
10567     +++ b/tools/objtool/check.c
10568     @@ -851,8 +851,14 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func,
10569     * This is a fairly uncommon pattern which is new for GCC 6. As of this
10570     * writing, there are 11 occurrences of it in the allmodconfig kernel.
10571     *
10572     + * As of GCC 7 there are quite a few more of these and the 'in between' code
10573     + * is significant. Esp. with KASAN enabled some of the code between the mov
10574     + * and jmpq uses .rodata itself, which can confuse things.
10575     + *
10576     * TODO: Once we have DWARF CFI and smarter instruction decoding logic,
10577     * ensure the same register is used in the mov and jump instructions.
10578     + *
10579     + * NOTE: RETPOLINE made it harder still to decode dynamic jumps.
10580     */
10581     static struct rela *find_switch_table(struct objtool_file *file,
10582     struct symbol *func,
10583     @@ -874,12 +880,25 @@ static struct rela *find_switch_table(struct objtool_file *file,
10584     text_rela->addend + 4);
10585     if (!rodata_rela)
10586     return NULL;
10587     +
10588     file->ignore_unreachables = true;
10589     return rodata_rela;
10590     }
10591    
10592     /* case 3 */
10593     - func_for_each_insn_continue_reverse(file, func, insn) {
10594     + /*
10595     + * Backward search using the @first_jump_src links, these help avoid
10596     + * much of the 'in between' code. Which avoids us getting confused by
10597     + * it.
10598     + */
10599     + for (insn = list_prev_entry(insn, list);
10600     +
10601     + &insn->list != &file->insn_list &&
10602     + insn->sec == func->sec &&
10603     + insn->offset >= func->offset;
10604     +
10605     + insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
10606     +
10607     if (insn->type == INSN_JUMP_DYNAMIC)
10608     break;
10609    
10610     @@ -909,14 +928,32 @@ static struct rela *find_switch_table(struct objtool_file *file,
10611     return NULL;
10612     }
10613    
10614     +
10615     static int add_func_switch_tables(struct objtool_file *file,
10616     struct symbol *func)
10617     {
10618     - struct instruction *insn, *prev_jump = NULL;
10619     + struct instruction *insn, *last = NULL, *prev_jump = NULL;
10620     struct rela *rela, *prev_rela = NULL;
10621     int ret;
10622    
10623     func_for_each_insn(file, func, insn) {
10624     + if (!last)
10625     + last = insn;
10626     +
10627     + /*
10628     + * Store back-pointers for unconditional forward jumps such
10629     + * that find_switch_table() can back-track using those and
10630     + * avoid some potentially confusing code.
10631     + */
10632     + if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
10633     + insn->offset > last->offset &&
10634     + insn->jump_dest->offset > insn->offset &&
10635     + !insn->jump_dest->first_jump_src) {
10636     +
10637     + insn->jump_dest->first_jump_src = insn;
10638     + last = insn->jump_dest;
10639     + }
10640     +
10641     if (insn->type != INSN_JUMP_DYNAMIC)
10642     continue;
10643    
10644     diff --git a/tools/objtool/check.h b/tools/objtool/check.h
10645     index dbadb304a410..23a1d065cae1 100644
10646     --- a/tools/objtool/check.h
10647     +++ b/tools/objtool/check.h
10648     @@ -47,6 +47,7 @@ struct instruction {
10649     bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
10650     struct symbol *call_dest;
10651     struct instruction *jump_dest;
10652     + struct instruction *first_jump_src;
10653     struct list_head alts;
10654     struct symbol *func;
10655     struct stack_op stack_op;
10656     diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
10657     index 2e43f9d42bd5..9a866459bff4 100644
10658     --- a/virt/kvm/arm/arm.c
10659     +++ b/virt/kvm/arm/arm.c
10660     @@ -31,6 +31,7 @@
10661     #include <linux/irqbypass.h>
10662     #include <trace/events/kvm.h>
10663     #include <kvm/arm_pmu.h>
10664     +#include <kvm/arm_psci.h>
10665    
10666     #define CREATE_TRACE_POINTS
10667     #include "trace.h"
10668     @@ -46,7 +47,6 @@
10669     #include <asm/kvm_mmu.h>
10670     #include <asm/kvm_emulate.h>
10671     #include <asm/kvm_coproc.h>
10672     -#include <asm/kvm_psci.h>
10673     #include <asm/sections.h>
10674    
10675     #ifdef REQUIRES_VIRT
10676     @@ -1158,7 +1158,7 @@ static void cpu_init_hyp_mode(void *dummy)
10677     pgd_ptr = kvm_mmu_get_httbr();
10678     stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
10679     hyp_stack_ptr = stack_page + PAGE_SIZE;
10680     - vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
10681     + vector_ptr = (unsigned long)kvm_get_hyp_vector();
10682    
10683     __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
10684     __cpu_init_stage2();
10685     @@ -1239,6 +1239,7 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
10686     cpu_hyp_reset();
10687    
10688     return NOTIFY_OK;
10689     + case CPU_PM_ENTER_FAILED:
10690     case CPU_PM_EXIT:
10691     if (__this_cpu_read(kvm_arm_hardware_enabled))
10692     /* The hardware was enabled before suspend. */
10693     @@ -1403,6 +1404,12 @@ static int init_hyp_mode(void)
10694     goto out_err;
10695     }
10696    
10697     + err = kvm_map_vectors();
10698     + if (err) {
10699     + kvm_err("Cannot map vectors\n");
10700     + goto out_err;
10701     + }
10702     +
10703     /*
10704     * Map the Hyp stack pages
10705     */
10706     diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
10707     index f1e363bab5e8..6919352cbf15 100644
10708     --- a/virt/kvm/arm/psci.c
10709     +++ b/virt/kvm/arm/psci.c
10710     @@ -15,16 +15,16 @@
10711     * along with this program. If not, see <http://www.gnu.org/licenses/>.
10712     */
10713    
10714     +#include <linux/arm-smccc.h>
10715     #include <linux/preempt.h>
10716     #include <linux/kvm_host.h>
10717     #include <linux/wait.h>
10718    
10719     #include <asm/cputype.h>
10720     #include <asm/kvm_emulate.h>
10721     -#include <asm/kvm_psci.h>
10722     #include <asm/kvm_host.h>
10723    
10724     -#include <uapi/linux/psci.h>
10725     +#include <kvm/arm_psci.h>
10726    
10727     /*
10728     * This is an implementation of the Power State Coordination Interface
10729     @@ -33,6 +33,38 @@
10730    
10731     #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
10732    
10733     +static u32 smccc_get_function(struct kvm_vcpu *vcpu)
10734     +{
10735     + return vcpu_get_reg(vcpu, 0);
10736     +}
10737     +
10738     +static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
10739     +{
10740     + return vcpu_get_reg(vcpu, 1);
10741     +}
10742     +
10743     +static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
10744     +{
10745     + return vcpu_get_reg(vcpu, 2);
10746     +}
10747     +
10748     +static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
10749     +{
10750     + return vcpu_get_reg(vcpu, 3);
10751     +}
10752     +
10753     +static void smccc_set_retval(struct kvm_vcpu *vcpu,
10754     + unsigned long a0,
10755     + unsigned long a1,
10756     + unsigned long a2,
10757     + unsigned long a3)
10758     +{
10759     + vcpu_set_reg(vcpu, 0, a0);
10760     + vcpu_set_reg(vcpu, 1, a1);
10761     + vcpu_set_reg(vcpu, 2, a2);
10762     + vcpu_set_reg(vcpu, 3, a3);
10763     +}
10764     +
10765     static unsigned long psci_affinity_mask(unsigned long affinity_level)
10766     {
10767     if (affinity_level <= 3)
10768     @@ -78,7 +110,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
10769     unsigned long context_id;
10770     phys_addr_t target_pc;
10771    
10772     - cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
10773     + cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
10774     if (vcpu_mode_is_32bit(source_vcpu))
10775     cpu_id &= ~((u32) 0);
10776    
10777     @@ -91,14 +123,14 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
10778     if (!vcpu)
10779     return PSCI_RET_INVALID_PARAMS;
10780     if (!vcpu->arch.power_off) {
10781     - if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
10782     + if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
10783     return PSCI_RET_ALREADY_ON;
10784     else
10785     return PSCI_RET_INVALID_PARAMS;
10786     }
10787    
10788     - target_pc = vcpu_get_reg(source_vcpu, 2);
10789     - context_id = vcpu_get_reg(source_vcpu, 3);
10790     + target_pc = smccc_get_arg2(source_vcpu);
10791     + context_id = smccc_get_arg3(source_vcpu);
10792    
10793     kvm_reset_vcpu(vcpu);
10794    
10795     @@ -117,7 +149,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
10796     * NOTE: We always update r0 (or x0) because for PSCI v0.1
10797     * the general puspose registers are undefined upon CPU_ON.
10798     */
10799     - vcpu_set_reg(vcpu, 0, context_id);
10800     + smccc_set_retval(vcpu, context_id, 0, 0, 0);
10801     vcpu->arch.power_off = false;
10802     smp_mb(); /* Make sure the above is visible */
10803    
10804     @@ -137,8 +169,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
10805     struct kvm *kvm = vcpu->kvm;
10806     struct kvm_vcpu *tmp;
10807    
10808     - target_affinity = vcpu_get_reg(vcpu, 1);
10809     - lowest_affinity_level = vcpu_get_reg(vcpu, 2);
10810     + target_affinity = smccc_get_arg1(vcpu);
10811     + lowest_affinity_level = smccc_get_arg2(vcpu);
10812    
10813     /* Determine target affinity mask */
10814     target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
10815     @@ -200,18 +232,10 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
10816     kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
10817     }
10818    
10819     -int kvm_psci_version(struct kvm_vcpu *vcpu)
10820     -{
10821     - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
10822     - return KVM_ARM_PSCI_0_2;
10823     -
10824     - return KVM_ARM_PSCI_0_1;
10825     -}
10826     -
10827     static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
10828     {
10829     struct kvm *kvm = vcpu->kvm;
10830     - unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
10831     + u32 psci_fn = smccc_get_function(vcpu);
10832     unsigned long val;
10833     int ret = 1;
10834    
10835     @@ -221,7 +245,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
10836     * Bits[31:16] = Major Version = 0
10837     * Bits[15:0] = Minor Version = 2
10838     */
10839     - val = 2;
10840     + val = KVM_ARM_PSCI_0_2;
10841     break;
10842     case PSCI_0_2_FN_CPU_SUSPEND:
10843     case PSCI_0_2_FN64_CPU_SUSPEND:
10844     @@ -278,14 +302,56 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
10845     break;
10846     }
10847    
10848     - vcpu_set_reg(vcpu, 0, val);
10849     + smccc_set_retval(vcpu, val, 0, 0, 0);
10850     + return ret;
10851     +}
10852     +
10853     +static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
10854     +{
10855     + u32 psci_fn = smccc_get_function(vcpu);
10856     + u32 feature;
10857     + unsigned long val;
10858     + int ret = 1;
10859     +
10860     + switch(psci_fn) {
10861     + case PSCI_0_2_FN_PSCI_VERSION:
10862     + val = KVM_ARM_PSCI_1_0;
10863     + break;
10864     + case PSCI_1_0_FN_PSCI_FEATURES:
10865     + feature = smccc_get_arg1(vcpu);
10866     + switch(feature) {
10867     + case PSCI_0_2_FN_PSCI_VERSION:
10868     + case PSCI_0_2_FN_CPU_SUSPEND:
10869     + case PSCI_0_2_FN64_CPU_SUSPEND:
10870     + case PSCI_0_2_FN_CPU_OFF:
10871     + case PSCI_0_2_FN_CPU_ON:
10872     + case PSCI_0_2_FN64_CPU_ON:
10873     + case PSCI_0_2_FN_AFFINITY_INFO:
10874     + case PSCI_0_2_FN64_AFFINITY_INFO:
10875     + case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
10876     + case PSCI_0_2_FN_SYSTEM_OFF:
10877     + case PSCI_0_2_FN_SYSTEM_RESET:
10878     + case PSCI_1_0_FN_PSCI_FEATURES:
10879     + case ARM_SMCCC_VERSION_FUNC_ID:
10880     + val = 0;
10881     + break;
10882     + default:
10883     + val = PSCI_RET_NOT_SUPPORTED;
10884     + break;
10885     + }
10886     + break;
10887     + default:
10888     + return kvm_psci_0_2_call(vcpu);
10889     + }
10890     +
10891     + smccc_set_retval(vcpu, val, 0, 0, 0);
10892     return ret;
10893     }
10894    
10895     static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
10896     {
10897     struct kvm *kvm = vcpu->kvm;
10898     - unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
10899     + u32 psci_fn = smccc_get_function(vcpu);
10900     unsigned long val;
10901    
10902     switch (psci_fn) {
10903     @@ -303,7 +369,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
10904     break;
10905     }
10906    
10907     - vcpu_set_reg(vcpu, 0, val);
10908     + smccc_set_retval(vcpu, val, 0, 0, 0);
10909     return 1;
10910     }
10911    
10912     @@ -321,9 +387,11 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
10913     * Errors:
10914     * -EINVAL: Unrecognized PSCI function
10915     */
10916     -int kvm_psci_call(struct kvm_vcpu *vcpu)
10917     +static int kvm_psci_call(struct kvm_vcpu *vcpu)
10918     {
10919     - switch (kvm_psci_version(vcpu)) {
10920     + switch (kvm_psci_version(vcpu, vcpu->kvm)) {
10921     + case KVM_ARM_PSCI_1_0:
10922     + return kvm_psci_1_0_call(vcpu);
10923     case KVM_ARM_PSCI_0_2:
10924     return kvm_psci_0_2_call(vcpu);
10925     case KVM_ARM_PSCI_0_1:
10926     @@ -332,3 +400,30 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
10927     return -EINVAL;
10928     };
10929     }
10930     +
10931     +int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
10932     +{
10933     + u32 func_id = smccc_get_function(vcpu);
10934     + u32 val = PSCI_RET_NOT_SUPPORTED;
10935     + u32 feature;
10936     +
10937     + switch (func_id) {
10938     + case ARM_SMCCC_VERSION_FUNC_ID:
10939     + val = ARM_SMCCC_VERSION_1_1;
10940     + break;
10941     + case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
10942     + feature = smccc_get_arg1(vcpu);
10943     + switch(feature) {
10944     + case ARM_SMCCC_ARCH_WORKAROUND_1:
10945     + if (kvm_arm_harden_branch_predictor())
10946     + val = 0;
10947     + break;
10948     + }
10949     + break;
10950     + default:
10951     + return kvm_psci_call(vcpu);
10952     + }
10953     +
10954     + smccc_set_retval(vcpu, val, 0, 0, 0);
10955     + return 1;
10956     +}