Magellan Linux

Contents of /trunk/kernel-alx/patches-4.14/0119-4.14.20-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (show annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 6 months ago) by niro
File size: 344988 byte(s)
-added up to patches-4.14.79
1 diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
2 index 66e8ce14d23d..f3d0d316d5f1 100644
3 --- a/Documentation/arm64/silicon-errata.txt
4 +++ b/Documentation/arm64/silicon-errata.txt
5 @@ -71,6 +71,7 @@ stable kernels.
6 | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 |
7 | Hisilicon | Hip0{6,7} | #161010701 | N/A |
8 | | | | |
9 -| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
10 +| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
11 | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
12 | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
13 +| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 |
14 diff --git a/Makefile b/Makefile
15 index 76a0b13623f4..33176140f133 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,7 +1,7 @@
19 # SPDX-License-Identifier: GPL-2.0
20 VERSION = 4
21 PATCHLEVEL = 14
22 -SUBLEVEL = 19
23 +SUBLEVEL = 20
24 EXTRAVERSION =
25 NAME = Petit Gorille
26
27 @@ -416,7 +416,8 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
28 export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
29
30 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
31 -export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN
32 +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
33 +export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
34 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
35 export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
36 export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
37 diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
38 index d2e4da93e68c..ca3322536f72 100644
39 --- a/arch/alpha/include/asm/futex.h
40 +++ b/arch/alpha/include/asm/futex.h
41 @@ -20,8 +20,8 @@
42 "3: .subsection 2\n" \
43 "4: br 1b\n" \
44 " .previous\n" \
45 - EXC(1b,3b,%1,$31) \
46 - EXC(2b,3b,%1,$31) \
47 + EXC(1b,3b,$31,%1) \
48 + EXC(2b,3b,$31,%1) \
49 : "=&r" (oldval), "=&r"(ret) \
50 : "r" (uaddr), "r"(oparg) \
51 : "memory")
52 @@ -82,8 +82,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
53 "3: .subsection 2\n"
54 "4: br 1b\n"
55 " .previous\n"
56 - EXC(1b,3b,%0,$31)
57 - EXC(2b,3b,%0,$31)
58 + EXC(1b,3b,$31,%0)
59 + EXC(2b,3b,$31,%0)
60 : "+r"(ret), "=&r"(prev), "=&r"(cmp)
61 : "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
62 : "memory");
63 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
64 index ce3a675c0c4b..75a5c35a2067 100644
65 --- a/arch/alpha/kernel/osf_sys.c
66 +++ b/arch/alpha/kernel/osf_sys.c
67 @@ -964,8 +964,8 @@ static inline long
68 put_tv32(struct timeval32 __user *o, struct timeval *i)
69 {
70 return copy_to_user(o, &(struct timeval32){
71 - .tv_sec = o->tv_sec,
72 - .tv_usec = o->tv_usec},
73 + .tv_sec = i->tv_sec,
74 + .tv_usec = i->tv_usec},
75 sizeof(struct timeval32));
76 }
77
78 diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h
79 index 26231601630e..f332d88ffaff 100644
80 --- a/arch/alpha/kernel/pci_impl.h
81 +++ b/arch/alpha/kernel/pci_impl.h
82 @@ -144,7 +144,8 @@ struct pci_iommu_arena
83 };
84
85 #if defined(CONFIG_ALPHA_SRM) && \
86 - (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA))
87 + (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA) || \
88 + defined(CONFIG_ALPHA_AVANTI))
89 # define NEED_SRM_SAVE_RESTORE
90 #else
91 # undef NEED_SRM_SAVE_RESTORE
92 diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
93 index 74bfb1f2d68e..3a885253f486 100644
94 --- a/arch/alpha/kernel/process.c
95 +++ b/arch/alpha/kernel/process.c
96 @@ -269,12 +269,13 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
97 application calling fork. */
98 if (clone_flags & CLONE_SETTLS)
99 childti->pcb.unique = regs->r20;
100 + else
101 + regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */
102 childti->pcb.usp = usp ?: rdusp();
103 *childregs = *regs;
104 childregs->r0 = 0;
105 childregs->r19 = 0;
106 childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
107 - regs->r20 = 0;
108 stack = ((struct switch_stack *) regs) - 1;
109 *childstack = *stack;
110 childstack->r26 = (unsigned long) ret_from_fork;
111 diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
112 index 4bd99a7b1c41..f43bd05dede2 100644
113 --- a/arch/alpha/kernel/traps.c
114 +++ b/arch/alpha/kernel/traps.c
115 @@ -160,11 +160,16 @@ void show_stack(struct task_struct *task, unsigned long *sp)
116 for(i=0; i < kstack_depth_to_print; i++) {
117 if (((long) stack & (THREAD_SIZE-1)) == 0)
118 break;
119 - if (i && ((i % 4) == 0))
120 - printk("\n ");
121 - printk("%016lx ", *stack++);
122 + if ((i % 4) == 0) {
123 + if (i)
124 + pr_cont("\n");
125 + printk(" ");
126 + } else {
127 + pr_cont(" ");
128 + }
129 + pr_cont("%016lx", *stack++);
130 }
131 - printk("\n");
132 + pr_cont("\n");
133 dik_show_trace(sp);
134 }
135
136 diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
137 index 1b0e0e86ee9c..96e62ec105d0 100644
138 --- a/arch/arm/crypto/crc32-ce-glue.c
139 +++ b/arch/arm/crypto/crc32-ce-glue.c
140 @@ -188,6 +188,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
141 .base.cra_name = "crc32",
142 .base.cra_driver_name = "crc32-arm-ce",
143 .base.cra_priority = 200,
144 + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
145 .base.cra_blocksize = 1,
146 .base.cra_module = THIS_MODULE,
147 }, {
148 @@ -203,6 +204,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
149 .base.cra_name = "crc32c",
150 .base.cra_driver_name = "crc32c-arm-ce",
151 .base.cra_priority = 200,
152 + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
153 .base.cra_blocksize = 1,
154 .base.cra_module = THIS_MODULE,
155 } };
156 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
157 index 4a879f6ff13b..31fbb9285f62 100644
158 --- a/arch/arm/include/asm/kvm_host.h
159 +++ b/arch/arm/include/asm/kvm_host.h
160 @@ -293,4 +293,10 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
161 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
162 struct kvm_device_attr *attr);
163
164 +static inline bool kvm_arm_harden_branch_predictor(void)
165 +{
166 + /* No way to detect it yet, pretend it is not there. */
167 + return false;
168 +}
169 +
170 #endif /* __ARM_KVM_HOST_H__ */
171 diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
172 index fa6f2174276b..eb46fc81a440 100644
173 --- a/arch/arm/include/asm/kvm_mmu.h
174 +++ b/arch/arm/include/asm/kvm_mmu.h
175 @@ -221,6 +221,16 @@ static inline unsigned int kvm_get_vmid_bits(void)
176 return 8;
177 }
178
179 +static inline void *kvm_get_hyp_vector(void)
180 +{
181 + return kvm_ksym_ref(__kvm_hyp_vector);
182 +}
183 +
184 +static inline int kvm_map_vectors(void)
185 +{
186 + return 0;
187 +}
188 +
189 #endif /* !__ASSEMBLY__ */
190
191 #endif /* __ARM_KVM_MMU_H__ */
192 diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
193 deleted file mode 100644
194 index 6bda945d31fa..000000000000
195 --- a/arch/arm/include/asm/kvm_psci.h
196 +++ /dev/null
197 @@ -1,27 +0,0 @@
198 -/*
199 - * Copyright (C) 2012 - ARM Ltd
200 - * Author: Marc Zyngier <marc.zyngier@arm.com>
201 - *
202 - * This program is free software; you can redistribute it and/or modify
203 - * it under the terms of the GNU General Public License version 2 as
204 - * published by the Free Software Foundation.
205 - *
206 - * This program is distributed in the hope that it will be useful,
207 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
208 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
209 - * GNU General Public License for more details.
210 - *
211 - * You should have received a copy of the GNU General Public License
212 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
213 - */
214 -
215 -#ifndef __ARM_KVM_PSCI_H__
216 -#define __ARM_KVM_PSCI_H__
217 -
218 -#define KVM_ARM_PSCI_0_1 1
219 -#define KVM_ARM_PSCI_0_2 2
220 -
221 -int kvm_psci_version(struct kvm_vcpu *vcpu);
222 -int kvm_psci_call(struct kvm_vcpu *vcpu);
223 -
224 -#endif /* __ARM_KVM_PSCI_H__ */
225 diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
226 index cf8bf6bf87c4..910bd8dabb3c 100644
227 --- a/arch/arm/kvm/handle_exit.c
228 +++ b/arch/arm/kvm/handle_exit.c
229 @@ -21,7 +21,7 @@
230 #include <asm/kvm_emulate.h>
231 #include <asm/kvm_coproc.h>
232 #include <asm/kvm_mmu.h>
233 -#include <asm/kvm_psci.h>
234 +#include <kvm/arm_psci.h>
235 #include <trace/events/kvm.h>
236
237 #include "trace.h"
238 @@ -36,9 +36,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
239 kvm_vcpu_hvc_get_imm(vcpu));
240 vcpu->stat.hvc_exit_stat++;
241
242 - ret = kvm_psci_call(vcpu);
243 + ret = kvm_hvc_call_handler(vcpu);
244 if (ret < 0) {
245 - kvm_inject_undefined(vcpu);
246 + vcpu_set_reg(vcpu, 0, ~0UL);
247 return 1;
248 }
249
250 @@ -47,7 +47,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
251
252 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
253 {
254 - kvm_inject_undefined(vcpu);
255 + /*
256 + * "If an SMC instruction executed at Non-secure EL1 is
257 + * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
258 + * Trap exception, not a Secure Monitor Call exception [...]"
259 + *
260 + * We need to advance the PC after the trap, as it would
261 + * otherwise return to the same address...
262 + */
263 + vcpu_set_reg(vcpu, 0, ~0UL);
264 + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
265 return 1;
266 }
267
268 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
269 index 0df64a6a56d4..c2abb4e88ff2 100644
270 --- a/arch/arm64/Kconfig
271 +++ b/arch/arm64/Kconfig
272 @@ -504,20 +504,13 @@ config CAVIUM_ERRATUM_30115
273 config QCOM_FALKOR_ERRATUM_1003
274 bool "Falkor E1003: Incorrect translation due to ASID change"
275 default y
276 - select ARM64_PAN if ARM64_SW_TTBR0_PAN
277 help
278 On Falkor v1, an incorrect ASID may be cached in the TLB when ASID
279 - and BADDR are changed together in TTBRx_EL1. The workaround for this
280 - issue is to use a reserved ASID in cpu_do_switch_mm() before
281 - switching to the new ASID. Saying Y here selects ARM64_PAN if
282 - ARM64_SW_TTBR0_PAN is selected. This is done because implementing and
283 - maintaining the E1003 workaround in the software PAN emulation code
284 - would be an unnecessary complication. The affected Falkor v1 CPU
285 - implements ARMv8.1 hardware PAN support and using hardware PAN
286 - support versus software PAN emulation is mutually exclusive at
287 - runtime.
288 -
289 - If unsure, say Y.
290 + and BADDR are changed together in TTBRx_EL1. Since we keep the ASID
291 + in TTBR1_EL1, this situation only occurs in the entry trampoline and
292 + then only for entries in the walk cache, since the leaf translation
293 + is unchanged. Work around the erratum by invalidating the walk cache
294 + entries for the trampoline before entering the kernel proper.
295
296 config QCOM_FALKOR_ERRATUM_1009
297 bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
298 @@ -539,6 +532,16 @@ config QCOM_QDF2400_ERRATUM_0065
299
300 If unsure, say Y.
301
302 +config QCOM_FALKOR_ERRATUM_E1041
303 + bool "Falkor E1041: Speculative instruction fetches might cause errant memory access"
304 + default y
305 + help
306 + Falkor CPU may speculatively fetch instructions from an improper
307 + memory location when MMU translation is changed from SCTLR_ELn[M]=1
308 + to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem.
309 +
310 + If unsure, say Y.
311 +
312 endmenu
313
314
315 @@ -803,6 +806,35 @@ config FORCE_MAX_ZONEORDER
316 However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
317 4M allocations matching the default size used by generic code.
318
319 +config UNMAP_KERNEL_AT_EL0
320 + bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
321 + default y
322 + help
323 + Speculation attacks against some high-performance processors can
324 + be used to bypass MMU permission checks and leak kernel data to
325 + userspace. This can be defended against by unmapping the kernel
326 + when running in userspace, mapping it back in on exception entry
327 + via a trampoline page in the vector table.
328 +
329 + If unsure, say Y.
330 +
331 +config HARDEN_BRANCH_PREDICTOR
332 + bool "Harden the branch predictor against aliasing attacks" if EXPERT
333 + default y
334 + help
335 + Speculation attacks against some high-performance processors rely on
336 + being able to manipulate the branch predictor for a victim context by
337 + executing aliasing branches in the attacker context. Such attacks
338 + can be partially mitigated against by clearing internal branch
339 + predictor state and limiting the prediction logic in some situations.
340 +
341 + This config option will take CPU-specific actions to harden the
342 + branch predictor against aliasing attacks and may rely on specific
343 + instruction sequences or control bits being set by the system
344 + firmware.
345 +
346 + If unsure, say Y.
347 +
348 menuconfig ARMV8_DEPRECATED
349 bool "Emulate deprecated/obsolete ARMv8 instructions"
350 depends on COMPAT
351 diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
352 index 9c3bdf87e543..51327645b3fb 100644
353 --- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts
354 +++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
355 @@ -61,6 +61,12 @@
356 reg = <0x0 0x0 0x0 0x80000000>;
357 };
358
359 + aliases {
360 + ethernet0 = &cpm_eth0;
361 + ethernet1 = &cpm_eth1;
362 + ethernet2 = &cpm_eth2;
363 + };
364 +
365 cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus {
366 compatible = "regulator-fixed";
367 regulator-name = "usb3h0-vbus";
368 diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
369 index 0d7b2ae46610..a4f82f1efbbc 100644
370 --- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts
371 +++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
372 @@ -61,6 +61,13 @@
373 reg = <0x0 0x0 0x0 0x80000000>;
374 };
375
376 + aliases {
377 + ethernet0 = &cpm_eth0;
378 + ethernet1 = &cpm_eth2;
379 + ethernet2 = &cps_eth0;
380 + ethernet3 = &cps_eth1;
381 + };
382 +
383 cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus {
384 compatible = "regulator-fixed";
385 regulator-name = "cpm-usb3h0-vbus";
386 diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
387 index acf5c7d16d79..2b6b792dab93 100644
388 --- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
389 +++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
390 @@ -62,6 +62,12 @@
391 reg = <0x0 0x0 0x0 0x80000000>;
392 };
393
394 + aliases {
395 + ethernet0 = &cpm_eth0;
396 + ethernet1 = &cps_eth0;
397 + ethernet2 = &cps_eth1;
398 + };
399 +
400 /* Regulator labels correspond with schematics */
401 v_3_3: regulator-3-3v {
402 compatible = "regulator-fixed";
403 diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c
404 index 624f4137918c..34b4e3d46aab 100644
405 --- a/arch/arm64/crypto/crc32-ce-glue.c
406 +++ b/arch/arm64/crypto/crc32-ce-glue.c
407 @@ -185,6 +185,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
408 .base.cra_name = "crc32",
409 .base.cra_driver_name = "crc32-arm64-ce",
410 .base.cra_priority = 200,
411 + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
412 .base.cra_blocksize = 1,
413 .base.cra_module = THIS_MODULE,
414 }, {
415 @@ -200,6 +201,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
416 .base.cra_name = "crc32c",
417 .base.cra_driver_name = "crc32c-arm64-ce",
418 .base.cra_priority = 200,
419 + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
420 .base.cra_blocksize = 1,
421 .base.cra_module = THIS_MODULE,
422 } };
423 diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
424 index b3da6c886835..dd49c3567f20 100644
425 --- a/arch/arm64/include/asm/asm-uaccess.h
426 +++ b/arch/arm64/include/asm/asm-uaccess.h
427 @@ -4,6 +4,7 @@
428
429 #include <asm/alternative.h>
430 #include <asm/kernel-pgtable.h>
431 +#include <asm/mmu.h>
432 #include <asm/sysreg.h>
433 #include <asm/assembler.h>
434
435 @@ -13,51 +14,62 @@
436 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
437 .macro __uaccess_ttbr0_disable, tmp1
438 mrs \tmp1, ttbr1_el1 // swapper_pg_dir
439 + bic \tmp1, \tmp1, #TTBR_ASID_MASK
440 add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
441 msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
442 isb
443 + sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE
444 + msr ttbr1_el1, \tmp1 // set reserved ASID
445 + isb
446 .endm
447
448 - .macro __uaccess_ttbr0_enable, tmp1
449 + .macro __uaccess_ttbr0_enable, tmp1, tmp2
450 get_thread_info \tmp1
451 ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
452 + mrs \tmp2, ttbr1_el1
453 + extr \tmp2, \tmp2, \tmp1, #48
454 + ror \tmp2, \tmp2, #16
455 + msr ttbr1_el1, \tmp2 // set the active ASID
456 + isb
457 msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
458 isb
459 .endm
460
461 - .macro uaccess_ttbr0_disable, tmp1
462 + .macro uaccess_ttbr0_disable, tmp1, tmp2
463 alternative_if_not ARM64_HAS_PAN
464 + save_and_disable_irq \tmp2 // avoid preemption
465 __uaccess_ttbr0_disable \tmp1
466 + restore_irq \tmp2
467 alternative_else_nop_endif
468 .endm
469
470 - .macro uaccess_ttbr0_enable, tmp1, tmp2
471 + .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
472 alternative_if_not ARM64_HAS_PAN
473 - save_and_disable_irq \tmp2 // avoid preemption
474 - __uaccess_ttbr0_enable \tmp1
475 - restore_irq \tmp2
476 + save_and_disable_irq \tmp3 // avoid preemption
477 + __uaccess_ttbr0_enable \tmp1, \tmp2
478 + restore_irq \tmp3
479 alternative_else_nop_endif
480 .endm
481 #else
482 - .macro uaccess_ttbr0_disable, tmp1
483 + .macro uaccess_ttbr0_disable, tmp1, tmp2
484 .endm
485
486 - .macro uaccess_ttbr0_enable, tmp1, tmp2
487 + .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
488 .endm
489 #endif
490
491 /*
492 * These macros are no-ops when UAO is present.
493 */
494 - .macro uaccess_disable_not_uao, tmp1
495 - uaccess_ttbr0_disable \tmp1
496 + .macro uaccess_disable_not_uao, tmp1, tmp2
497 + uaccess_ttbr0_disable \tmp1, \tmp2
498 alternative_if ARM64_ALT_PAN_NOT_UAO
499 SET_PSTATE_PAN(1)
500 alternative_else_nop_endif
501 .endm
502
503 - .macro uaccess_enable_not_uao, tmp1, tmp2
504 - uaccess_ttbr0_enable \tmp1, \tmp2
505 + .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
506 + uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
507 alternative_if ARM64_ALT_PAN_NOT_UAO
508 SET_PSTATE_PAN(0)
509 alternative_else_nop_endif
510 diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
511 index d58a6253c6ab..463619dcadd4 100644
512 --- a/arch/arm64/include/asm/assembler.h
513 +++ b/arch/arm64/include/asm/assembler.h
514 @@ -25,7 +25,6 @@
515
516 #include <asm/asm-offsets.h>
517 #include <asm/cpufeature.h>
518 -#include <asm/mmu_context.h>
519 #include <asm/page.h>
520 #include <asm/pgtable-hwdef.h>
521 #include <asm/ptrace.h>
522 @@ -96,6 +95,24 @@
523 dmb \opt
524 .endm
525
526 +/*
527 + * Value prediction barrier
528 + */
529 + .macro csdb
530 + hint #20
531 + .endm
532 +
533 +/*
534 + * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
535 + * of bounds.
536 + */
537 + .macro mask_nospec64, idx, limit, tmp
538 + sub \tmp, \idx, \limit
539 + bic \tmp, \tmp, \idx
540 + and \idx, \idx, \tmp, asr #63
541 + csdb
542 + .endm
543 +
544 /*
545 * NOP sequence
546 */
547 @@ -464,39 +481,18 @@ alternative_endif
548 mrs \rd, sp_el0
549 .endm
550
551 -/*
552 - * Errata workaround prior to TTBR0_EL1 update
553 - *
554 - * val: TTBR value with new BADDR, preserved
555 - * tmp0: temporary register, clobbered
556 - * tmp1: other temporary register, clobbered
557 +/**
558 + * Errata workaround prior to disable MMU. Insert an ISB immediately prior
559 + * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
560 */
561 - .macro pre_ttbr0_update_workaround, val, tmp0, tmp1
562 -#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
563 -alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
564 - mrs \tmp0, ttbr0_el1
565 - mov \tmp1, #FALKOR_RESERVED_ASID
566 - bfi \tmp0, \tmp1, #48, #16 // reserved ASID + old BADDR
567 - msr ttbr0_el1, \tmp0
568 + .macro pre_disable_mmu_workaround
569 +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
570 isb
571 - bfi \tmp0, \val, #0, #48 // reserved ASID + new BADDR
572 - msr ttbr0_el1, \tmp0
573 - isb
574 -alternative_else_nop_endif
575 #endif
576 .endm
577
578 -/*
579 - * Errata workaround post TTBR0_EL1 update.
580 - */
581 - .macro post_ttbr0_update_workaround
582 -#ifdef CONFIG_CAVIUM_ERRATUM_27456
583 -alternative_if ARM64_WORKAROUND_CAVIUM_27456
584 - ic iallu
585 - dsb nsh
586 - isb
587 -alternative_else_nop_endif
588 -#endif
589 + .macro pte_to_phys, phys, pte
590 + and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
591 .endm
592
593 #endif /* __ASM_ASSEMBLER_H */
594 diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
595 index 0fe7e43b7fbc..0b0755c961ac 100644
596 --- a/arch/arm64/include/asm/barrier.h
597 +++ b/arch/arm64/include/asm/barrier.h
598 @@ -31,6 +31,8 @@
599 #define dmb(opt) asm volatile("dmb " #opt : : : "memory")
600 #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
601
602 +#define csdb() asm volatile("hint #20" : : : "memory")
603 +
604 #define mb() dsb(sy)
605 #define rmb() dsb(ld)
606 #define wmb() dsb(st)
607 @@ -38,6 +40,27 @@
608 #define dma_rmb() dmb(oshld)
609 #define dma_wmb() dmb(oshst)
610
611 +/*
612 + * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
613 + * and 0 otherwise.
614 + */
615 +#define array_index_mask_nospec array_index_mask_nospec
616 +static inline unsigned long array_index_mask_nospec(unsigned long idx,
617 + unsigned long sz)
618 +{
619 + unsigned long mask;
620 +
621 + asm volatile(
622 + " cmp %1, %2\n"
623 + " sbc %0, xzr, xzr\n"
624 + : "=r" (mask)
625 + : "r" (idx), "Ir" (sz)
626 + : "cc");
627 +
628 + csdb();
629 + return mask;
630 +}
631 +
632 #define __smp_mb() dmb(ish)
633 #define __smp_rmb() dmb(ishld)
634 #define __smp_wmb() dmb(ishst)
635 diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
636 index 8da621627d7c..2e7b236bc596 100644
637 --- a/arch/arm64/include/asm/cpucaps.h
638 +++ b/arch/arm64/include/asm/cpucaps.h
639 @@ -40,7 +40,10 @@
640 #define ARM64_WORKAROUND_858921 19
641 #define ARM64_WORKAROUND_CAVIUM_30115 20
642 #define ARM64_HAS_DCPOP 21
643 +#define ARM64_UNMAP_KERNEL_AT_EL0 23
644 +#define ARM64_HARDEN_BRANCH_PREDICTOR 24
645 +#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
646
647 -#define ARM64_NCAPS 22
648 +#define ARM64_NCAPS 26
649
650 #endif /* __ASM_CPUCAPS_H */
651 diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
652 index 235e77d98261..be7bd19c87ec 100644
653 --- a/arch/arm64/include/asm/cputype.h
654 +++ b/arch/arm64/include/asm/cputype.h
655 @@ -79,26 +79,37 @@
656 #define ARM_CPU_PART_AEM_V8 0xD0F
657 #define ARM_CPU_PART_FOUNDATION 0xD00
658 #define ARM_CPU_PART_CORTEX_A57 0xD07
659 +#define ARM_CPU_PART_CORTEX_A72 0xD08
660 #define ARM_CPU_PART_CORTEX_A53 0xD03
661 #define ARM_CPU_PART_CORTEX_A73 0xD09
662 +#define ARM_CPU_PART_CORTEX_A75 0xD0A
663
664 #define APM_CPU_PART_POTENZA 0x000
665
666 #define CAVIUM_CPU_PART_THUNDERX 0x0A1
667 #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
668 #define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
669 +#define CAVIUM_CPU_PART_THUNDERX2 0x0AF
670
671 #define BRCM_CPU_PART_VULCAN 0x516
672
673 #define QCOM_CPU_PART_FALKOR_V1 0x800
674 +#define QCOM_CPU_PART_FALKOR 0xC00
675 +#define QCOM_CPU_PART_KRYO 0x200
676
677 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
678 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
679 +#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
680 #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
681 +#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
682 #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
683 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
684 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
685 +#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
686 +#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
687 #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
688 +#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
689 +#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
690
691 #ifndef __ASSEMBLY__
692
693 diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
694 index c4cd5081d78b..8389050328bb 100644
695 --- a/arch/arm64/include/asm/efi.h
696 +++ b/arch/arm64/include/asm/efi.h
697 @@ -121,19 +121,21 @@ static inline void efi_set_pgd(struct mm_struct *mm)
698 if (mm != current->active_mm) {
699 /*
700 * Update the current thread's saved ttbr0 since it is
701 - * restored as part of a return from exception. Set
702 - * the hardware TTBR0_EL1 using cpu_switch_mm()
703 - * directly to enable potential errata workarounds.
704 + * restored as part of a return from exception. Enable
705 + * access to the valid TTBR0_EL1 and invoke the errata
706 + * workaround directly since there is no return from
707 + * exception when invoking the EFI run-time services.
708 */
709 update_saved_ttbr0(current, mm);
710 - cpu_switch_mm(mm->pgd, mm);
711 + uaccess_ttbr0_enable();
712 + post_ttbr_update_workaround();
713 } else {
714 /*
715 * Defer the switch to the current thread's TTBR0_EL1
716 * until uaccess_enable(). Restore the current
717 * thread's saved ttbr0 corresponding to its active_mm
718 */
719 - cpu_set_reserved_ttbr0();
720 + uaccess_ttbr0_disable();
721 update_saved_ttbr0(current, current->active_mm);
722 }
723 }
724 diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
725 index 4052ec39e8db..ec1e6d6fa14c 100644
726 --- a/arch/arm64/include/asm/fixmap.h
727 +++ b/arch/arm64/include/asm/fixmap.h
728 @@ -58,6 +58,11 @@ enum fixed_addresses {
729 FIX_APEI_GHES_NMI,
730 #endif /* CONFIG_ACPI_APEI_GHES */
731
732 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
733 + FIX_ENTRY_TRAMP_DATA,
734 + FIX_ENTRY_TRAMP_TEXT,
735 +#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
736 +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
737 __end_of_permanent_fixed_addresses,
738
739 /*
740 diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
741 index 5bb2fd4674e7..07fe2479d310 100644
742 --- a/arch/arm64/include/asm/futex.h
743 +++ b/arch/arm64/include/asm/futex.h
744 @@ -48,9 +48,10 @@ do { \
745 } while (0)
746
747 static inline int
748 -arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
749 +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
750 {
751 int oldval = 0, ret, tmp;
752 + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
753
754 pagefault_disable();
755
756 @@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
757 }
758
759 static inline int
760 -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
761 +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
762 u32 oldval, u32 newval)
763 {
764 int ret = 0;
765 u32 val, tmp;
766 + u32 __user *uaddr;
767
768 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
769 + if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32)))
770 return -EFAULT;
771
772 + uaddr = __uaccess_mask_ptr(_uaddr);
773 uaccess_enable();
774 asm volatile("// futex_atomic_cmpxchg_inatomic\n"
775 " prfm pstl1strm, %2\n"
776 diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
777 index 26a64d0f9ab9..a7ef5a051911 100644
778 --- a/arch/arm64/include/asm/kvm_asm.h
779 +++ b/arch/arm64/include/asm/kvm_asm.h
780 @@ -66,6 +66,8 @@ extern u32 __kvm_get_mdcr_el2(void);
781
782 extern u32 __init_stage2_translation(void);
783
784 +extern void __qcom_hyp_sanitize_btac_predictors(void);
785 +
786 #endif
787
788 #endif /* __ARM_KVM_ASM_H__ */
789 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
790 index e923b58606e2..8ad208cb866c 100644
791 --- a/arch/arm64/include/asm/kvm_host.h
792 +++ b/arch/arm64/include/asm/kvm_host.h
793 @@ -384,4 +384,9 @@ static inline void __cpu_init_stage2(void)
794 "PARange is %d bits, unsupported configuration!", parange);
795 }
796
797 +static inline bool kvm_arm_harden_branch_predictor(void)
798 +{
799 + return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
800 +}
801 +
802 #endif /* __ARM64_KVM_HOST_H__ */
803 diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
804 index 672c8684d5c2..2d6d4bd9de52 100644
805 --- a/arch/arm64/include/asm/kvm_mmu.h
806 +++ b/arch/arm64/include/asm/kvm_mmu.h
807 @@ -309,5 +309,43 @@ static inline unsigned int kvm_get_vmid_bits(void)
808 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
809 }
810
811 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
812 +#include <asm/mmu.h>
813 +
814 +static inline void *kvm_get_hyp_vector(void)
815 +{
816 + struct bp_hardening_data *data = arm64_get_bp_hardening_data();
817 + void *vect = kvm_ksym_ref(__kvm_hyp_vector);
818 +
819 + if (data->fn) {
820 + vect = __bp_harden_hyp_vecs_start +
821 + data->hyp_vectors_slot * SZ_2K;
822 +
823 + if (!has_vhe())
824 + vect = lm_alias(vect);
825 + }
826 +
827 + return vect;
828 +}
829 +
830 +static inline int kvm_map_vectors(void)
831 +{
832 + return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
833 + kvm_ksym_ref(__bp_harden_hyp_vecs_end),
834 + PAGE_HYP_EXEC);
835 +}
836 +
837 +#else
838 +static inline void *kvm_get_hyp_vector(void)
839 +{
840 + return kvm_ksym_ref(__kvm_hyp_vector);
841 +}
842 +
843 +static inline int kvm_map_vectors(void)
844 +{
845 + return 0;
846 +}
847 +#endif
848 +
849 #endif /* __ASSEMBLY__ */
850 #endif /* __ARM64_KVM_MMU_H__ */
851 diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h
852 deleted file mode 100644
853 index bc39e557c56c..000000000000
854 --- a/arch/arm64/include/asm/kvm_psci.h
855 +++ /dev/null
856 @@ -1,27 +0,0 @@
857 -/*
858 - * Copyright (C) 2012,2013 - ARM Ltd
859 - * Author: Marc Zyngier <marc.zyngier@arm.com>
860 - *
861 - * This program is free software; you can redistribute it and/or modify
862 - * it under the terms of the GNU General Public License version 2 as
863 - * published by the Free Software Foundation.
864 - *
865 - * This program is distributed in the hope that it will be useful,
866 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
867 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
868 - * GNU General Public License for more details.
869 - *
870 - * You should have received a copy of the GNU General Public License
871 - * along with this program. If not, see <http://www.gnu.org/licenses/>.
872 - */
873 -
874 -#ifndef __ARM64_KVM_PSCI_H__
875 -#define __ARM64_KVM_PSCI_H__
876 -
877 -#define KVM_ARM_PSCI_0_1 1
878 -#define KVM_ARM_PSCI_0_2 2
879 -
880 -int kvm_psci_version(struct kvm_vcpu *vcpu);
881 -int kvm_psci_call(struct kvm_vcpu *vcpu);
882 -
883 -#endif /* __ARM64_KVM_PSCI_H__ */
884 diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
885 index f7c4d2146aed..d4bae7d6e0d8 100644
886 --- a/arch/arm64/include/asm/memory.h
887 +++ b/arch/arm64/include/asm/memory.h
888 @@ -61,8 +61,6 @@
889 * KIMAGE_VADDR - the virtual address of the start of the kernel image
890 * VA_BITS - the maximum number of bits for virtual addresses.
891 * VA_START - the first kernel virtual address.
892 - * TASK_SIZE - the maximum size of a user space task.
893 - * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
894 */
895 #define VA_BITS (CONFIG_ARM64_VA_BITS)
896 #define VA_START (UL(0xffffffffffffffff) - \
897 @@ -77,19 +75,6 @@
898 #define PCI_IO_END (VMEMMAP_START - SZ_2M)
899 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
900 #define FIXADDR_TOP (PCI_IO_START - SZ_2M)
901 -#define TASK_SIZE_64 (UL(1) << VA_BITS)
902 -
903 -#ifdef CONFIG_COMPAT
904 -#define TASK_SIZE_32 UL(0x100000000)
905 -#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
906 - TASK_SIZE_32 : TASK_SIZE_64)
907 -#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
908 - TASK_SIZE_32 : TASK_SIZE_64)
909 -#else
910 -#define TASK_SIZE TASK_SIZE_64
911 -#endif /* CONFIG_COMPAT */
912 -
913 -#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
914
915 #define KERNEL_START _text
916 #define KERNEL_END _end
917 diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
918 index 0d34bf0a89c7..6dd83d75b82a 100644
919 --- a/arch/arm64/include/asm/mmu.h
920 +++ b/arch/arm64/include/asm/mmu.h
921 @@ -17,6 +17,10 @@
922 #define __ASM_MMU_H
923
924 #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
925 +#define USER_ASID_FLAG (UL(1) << 48)
926 +#define TTBR_ASID_MASK (UL(0xffff) << 48)
927 +
928 +#ifndef __ASSEMBLY__
929
930 typedef struct {
931 atomic64_t id;
932 @@ -31,6 +35,49 @@ typedef struct {
933 */
934 #define ASID(mm) ((mm)->context.id.counter & 0xffff)
935
936 +static inline bool arm64_kernel_unmapped_at_el0(void)
937 +{
938 + return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
939 + cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
940 +}
941 +
942 +typedef void (*bp_hardening_cb_t)(void);
943 +
944 +struct bp_hardening_data {
945 + int hyp_vectors_slot;
946 + bp_hardening_cb_t fn;
947 +};
948 +
949 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
950 +extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
951 +
952 +DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
953 +
954 +static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
955 +{
956 + return this_cpu_ptr(&bp_hardening_data);
957 +}
958 +
959 +static inline void arm64_apply_bp_hardening(void)
960 +{
961 + struct bp_hardening_data *d;
962 +
963 + if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
964 + return;
965 +
966 + d = arm64_get_bp_hardening_data();
967 + if (d->fn)
968 + d->fn();
969 +}
970 +#else
971 +static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
972 +{
973 + return NULL;
974 +}
975 +
976 +static inline void arm64_apply_bp_hardening(void) { }
977 +#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
978 +
979 extern void paging_init(void);
980 extern void bootmem_init(void);
981 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
982 @@ -41,4 +88,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
983 extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
984 extern void mark_linear_text_alias_ro(void);
985
986 +#endif /* !__ASSEMBLY__ */
987 #endif
988 diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
989 index 9d155fa9a507..779d7a2ec5ec 100644
990 --- a/arch/arm64/include/asm/mmu_context.h
991 +++ b/arch/arm64/include/asm/mmu_context.h
992 @@ -19,8 +19,6 @@
993 #ifndef __ASM_MMU_CONTEXT_H
994 #define __ASM_MMU_CONTEXT_H
995
996 -#define FALKOR_RESERVED_ASID 1
997 -
998 #ifndef __ASSEMBLY__
999
1000 #include <linux/compiler.h>
1001 @@ -57,6 +55,13 @@ static inline void cpu_set_reserved_ttbr0(void)
1002 isb();
1003 }
1004
1005 +static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
1006 +{
1007 + BUG_ON(pgd == swapper_pg_dir);
1008 + cpu_set_reserved_ttbr0();
1009 + cpu_do_switch_mm(virt_to_phys(pgd),mm);
1010 +}
1011 +
1012 /*
1013 * TCR.T0SZ value to use when the ID map is active. Usually equals
1014 * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
1015 @@ -170,7 +175,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
1016 else
1017 ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
1018
1019 - task_thread_info(tsk)->ttbr0 = ttbr;
1020 + WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
1021 }
1022 #else
1023 static inline void update_saved_ttbr0(struct task_struct *tsk,
1024 @@ -225,6 +230,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
1025 #define activate_mm(prev,next) switch_mm(prev, next, current)
1026
1027 void verify_cpu_asid_bits(void);
1028 +void post_ttbr_update_workaround(void);
1029
1030 #endif /* !__ASSEMBLY__ */
1031
1032 diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
1033 index eb0c2bd90de9..8df4cb6ac6f7 100644
1034 --- a/arch/arm64/include/asm/pgtable-hwdef.h
1035 +++ b/arch/arm64/include/asm/pgtable-hwdef.h
1036 @@ -272,6 +272,7 @@
1037 #define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT)
1038 #define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT)
1039
1040 +#define TCR_A1 (UL(1) << 22)
1041 #define TCR_ASID16 (UL(1) << 36)
1042 #define TCR_TBI0 (UL(1) << 37)
1043 #define TCR_HA (UL(1) << 39)
1044 diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
1045 index 0a5635fb0ef9..2db84df5eb42 100644
1046 --- a/arch/arm64/include/asm/pgtable-prot.h
1047 +++ b/arch/arm64/include/asm/pgtable-prot.h
1048 @@ -34,8 +34,14 @@
1049
1050 #include <asm/pgtable-types.h>
1051
1052 -#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
1053 -#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
1054 +#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
1055 +#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
1056 +
1057 +#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
1058 +#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
1059 +
1060 +#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
1061 +#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
1062
1063 #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
1064 #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
1065 @@ -47,23 +53,24 @@
1066 #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
1067 #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
1068
1069 -#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
1070 +#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
1071 +#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT
1072
1073 -#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
1074 -#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
1075 -#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
1076 -#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
1077 -#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
1078 +#define PAGE_KERNEL __pgprot(PROT_NORMAL)
1079 +#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
1080 +#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
1081 +#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
1082 +#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
1083
1084 -#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
1085 -#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
1086 -#define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
1087 +#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
1088 +#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
1089 +#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
1090 #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
1091
1092 -#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
1093 -#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
1094 +#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
1095 +#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
1096
1097 -#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
1098 +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
1099 #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
1100 #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
1101 #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
1102 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
1103 index 960d05c8816a..aafea648a30f 100644
1104 --- a/arch/arm64/include/asm/pgtable.h
1105 +++ b/arch/arm64/include/asm/pgtable.h
1106 @@ -684,6 +684,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1107
1108 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
1109 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
1110 +extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
1111
1112 /*
1113 * Encode and decode a swap entry:
1114 diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
1115 index 14ad6e4e87d1..16cef2e8449e 100644
1116 --- a/arch/arm64/include/asm/proc-fns.h
1117 +++ b/arch/arm64/include/asm/proc-fns.h
1118 @@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
1119
1120 #include <asm/memory.h>
1121
1122 -#define cpu_switch_mm(pgd,mm) \
1123 -do { \
1124 - BUG_ON(pgd == swapper_pg_dir); \
1125 - cpu_do_switch_mm(virt_to_phys(pgd),mm); \
1126 -} while (0)
1127 -
1128 #endif /* __ASSEMBLY__ */
1129 #endif /* __KERNEL__ */
1130 #endif /* __ASM_PROCFNS_H */
1131 diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
1132 index 29adab8138c3..fda6f5812281 100644
1133 --- a/arch/arm64/include/asm/processor.h
1134 +++ b/arch/arm64/include/asm/processor.h
1135 @@ -19,6 +19,13 @@
1136 #ifndef __ASM_PROCESSOR_H
1137 #define __ASM_PROCESSOR_H
1138
1139 +#define TASK_SIZE_64 (UL(1) << VA_BITS)
1140 +
1141 +#define KERNEL_DS UL(-1)
1142 +#define USER_DS (TASK_SIZE_64 - 1)
1143 +
1144 +#ifndef __ASSEMBLY__
1145 +
1146 /*
1147 * Default implementation of macro that returns current
1148 * instruction pointer ("program counter").
1149 @@ -37,6 +44,22 @@
1150 #include <asm/ptrace.h>
1151 #include <asm/types.h>
1152
1153 +/*
1154 + * TASK_SIZE - the maximum size of a user space task.
1155 + * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
1156 + */
1157 +#ifdef CONFIG_COMPAT
1158 +#define TASK_SIZE_32 UL(0x100000000)
1159 +#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
1160 + TASK_SIZE_32 : TASK_SIZE_64)
1161 +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
1162 + TASK_SIZE_32 : TASK_SIZE_64)
1163 +#else
1164 +#define TASK_SIZE TASK_SIZE_64
1165 +#endif /* CONFIG_COMPAT */
1166 +
1167 +#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
1168 +
1169 #define STACK_TOP_MAX TASK_SIZE_64
1170 #ifdef CONFIG_COMPAT
1171 #define AARCH32_VECTORS_BASE 0xffff0000
1172 @@ -194,4 +217,5 @@ static inline void spin_lock_prefetch(const void *ptr)
1173 int cpu_enable_pan(void *__unused);
1174 int cpu_enable_cache_maint_trap(void *__unused);
1175
1176 +#endif /* __ASSEMBLY__ */
1177 #endif /* __ASM_PROCESSOR_H */
1178 diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
1179 index f707fed5886f..ede80d47d0ef 100644
1180 --- a/arch/arm64/include/asm/sysreg.h
1181 +++ b/arch/arm64/include/asm/sysreg.h
1182 @@ -332,6 +332,8 @@
1183 #define ID_AA64ISAR1_DPB_SHIFT 0
1184
1185 /* id_aa64pfr0 */
1186 +#define ID_AA64PFR0_CSV3_SHIFT 60
1187 +#define ID_AA64PFR0_CSV2_SHIFT 56
1188 #define ID_AA64PFR0_GIC_SHIFT 24
1189 #define ID_AA64PFR0_ASIMD_SHIFT 20
1190 #define ID_AA64PFR0_FP_SHIFT 16
1191 diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
1192 index af1c76981911..9e82dd79c7db 100644
1193 --- a/arch/arm64/include/asm/tlbflush.h
1194 +++ b/arch/arm64/include/asm/tlbflush.h
1195 @@ -23,6 +23,7 @@
1196
1197 #include <linux/sched.h>
1198 #include <asm/cputype.h>
1199 +#include <asm/mmu.h>
1200
1201 /*
1202 * Raw TLBI operations.
1203 @@ -54,6 +55,11 @@
1204
1205 #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
1206
1207 +#define __tlbi_user(op, arg) do { \
1208 + if (arm64_kernel_unmapped_at_el0()) \
1209 + __tlbi(op, (arg) | USER_ASID_FLAG); \
1210 +} while (0)
1211 +
1212 /*
1213 * TLB Management
1214 * ==============
1215 @@ -115,6 +121,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
1216
1217 dsb(ishst);
1218 __tlbi(aside1is, asid);
1219 + __tlbi_user(aside1is, asid);
1220 dsb(ish);
1221 }
1222
1223 @@ -125,6 +132,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
1224
1225 dsb(ishst);
1226 __tlbi(vale1is, addr);
1227 + __tlbi_user(vale1is, addr);
1228 dsb(ish);
1229 }
1230
1231 @@ -151,10 +159,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
1232
1233 dsb(ishst);
1234 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
1235 - if (last_level)
1236 + if (last_level) {
1237 __tlbi(vale1is, addr);
1238 - else
1239 + __tlbi_user(vale1is, addr);
1240 + } else {
1241 __tlbi(vae1is, addr);
1242 + __tlbi_user(vae1is, addr);
1243 + }
1244 }
1245 dsb(ish);
1246 }
1247 @@ -194,6 +205,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
1248 unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
1249
1250 __tlbi(vae1is, addr);
1251 + __tlbi_user(vae1is, addr);
1252 dsb(ish);
1253 }
1254
1255 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
1256 index fc0f9eb66039..fad8c1b2ca3e 100644
1257 --- a/arch/arm64/include/asm/uaccess.h
1258 +++ b/arch/arm64/include/asm/uaccess.h
1259 @@ -35,16 +35,20 @@
1260 #include <asm/compiler.h>
1261 #include <asm/extable.h>
1262
1263 -#define KERNEL_DS (-1UL)
1264 #define get_ds() (KERNEL_DS)
1265 -
1266 -#define USER_DS TASK_SIZE_64
1267 #define get_fs() (current_thread_info()->addr_limit)
1268
1269 static inline void set_fs(mm_segment_t fs)
1270 {
1271 current_thread_info()->addr_limit = fs;
1272
1273 + /*
1274 + * Prevent a mispredicted conditional call to set_fs from forwarding
1275 + * the wrong address limit to access_ok under speculation.
1276 + */
1277 + dsb(nsh);
1278 + isb();
1279 +
1280 /* On user-mode return, check fs is correct */
1281 set_thread_flag(TIF_FSCHECK);
1282
1283 @@ -66,22 +70,32 @@ static inline void set_fs(mm_segment_t fs)
1284 * Returns 1 if the range is valid, 0 otherwise.
1285 *
1286 * This is equivalent to the following test:
1287 - * (u65)addr + (u65)size <= current->addr_limit
1288 - *
1289 - * This needs 65-bit arithmetic.
1290 + * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
1291 */
1292 -#define __range_ok(addr, size) \
1293 -({ \
1294 - unsigned long __addr = (unsigned long)(addr); \
1295 - unsigned long flag, roksum; \
1296 - __chk_user_ptr(addr); \
1297 - asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
1298 - : "=&r" (flag), "=&r" (roksum) \
1299 - : "1" (__addr), "Ir" (size), \
1300 - "r" (current_thread_info()->addr_limit) \
1301 - : "cc"); \
1302 - flag; \
1303 -})
1304 +static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
1305 +{
1306 + unsigned long limit = current_thread_info()->addr_limit;
1307 +
1308 + __chk_user_ptr(addr);
1309 + asm volatile(
1310 + // A + B <= C + 1 for all A,B,C, in four easy steps:
1311 + // 1: X = A + B; X' = X % 2^64
1312 + " adds %0, %0, %2\n"
1313 + // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
1314 + " csel %1, xzr, %1, hi\n"
1315 + // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
1316 + // to compensate for the carry flag being set in step 4. For
1317 + // X > 2^64, X' merely has to remain nonzero, which it does.
1318 + " csinv %0, %0, xzr, cc\n"
1319 + // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
1320 + // comes from the carry in being clear. Otherwise, we are
1321 + // testing X' - C == 0, subject to the previous adjustments.
1322 + " sbcs xzr, %0, %1\n"
1323 + " cset %0, ls\n"
1324 + : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
1325 +
1326 + return addr;
1327 +}
1328
1329 /*
1330 * When dealing with data aborts, watchpoints, or instruction traps we may end
1331 @@ -90,7 +104,7 @@ static inline void set_fs(mm_segment_t fs)
1332 */
1333 #define untagged_addr(addr) sign_extend64(addr, 55)
1334
1335 -#define access_ok(type, addr, size) __range_ok(addr, size)
1336 +#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
1337 #define user_addr_max get_fs
1338
1339 #define _ASM_EXTABLE(from, to) \
1340 @@ -105,17 +119,23 @@ static inline void set_fs(mm_segment_t fs)
1341 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
1342 static inline void __uaccess_ttbr0_disable(void)
1343 {
1344 - unsigned long ttbr;
1345 + unsigned long flags, ttbr;
1346
1347 + local_irq_save(flags);
1348 + ttbr = read_sysreg(ttbr1_el1);
1349 + ttbr &= ~TTBR_ASID_MASK;
1350 /* reserved_ttbr0 placed at the end of swapper_pg_dir */
1351 - ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
1352 - write_sysreg(ttbr, ttbr0_el1);
1353 + write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
1354 isb();
1355 + /* Set reserved ASID */
1356 + write_sysreg(ttbr, ttbr1_el1);
1357 + isb();
1358 + local_irq_restore(flags);
1359 }
1360
1361 static inline void __uaccess_ttbr0_enable(void)
1362 {
1363 - unsigned long flags;
1364 + unsigned long flags, ttbr0, ttbr1;
1365
1366 /*
1367 * Disable interrupts to avoid preemption between reading the 'ttbr0'
1368 @@ -123,7 +143,17 @@ static inline void __uaccess_ttbr0_enable(void)
1369 * roll-over and an update of 'ttbr0'.
1370 */
1371 local_irq_save(flags);
1372 - write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
1373 + ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
1374 +
1375 + /* Restore active ASID */
1376 + ttbr1 = read_sysreg(ttbr1_el1);
1377 + ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
1378 + ttbr1 |= ttbr0 & TTBR_ASID_MASK;
1379 + write_sysreg(ttbr1, ttbr1_el1);
1380 + isb();
1381 +
1382 + /* Restore user page table */
1383 + write_sysreg(ttbr0, ttbr0_el1);
1384 isb();
1385 local_irq_restore(flags);
1386 }
1387 @@ -192,6 +222,26 @@ static inline void uaccess_enable_not_uao(void)
1388 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
1389 }
1390
1391 +/*
1392 + * Sanitise a uaccess pointer such that it becomes NULL if above the
1393 + * current addr_limit.
1394 + */
1395 +#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
1396 +static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
1397 +{
1398 + void __user *safe_ptr;
1399 +
1400 + asm volatile(
1401 + " bics xzr, %1, %2\n"
1402 + " csel %0, %1, xzr, eq\n"
1403 + : "=&r" (safe_ptr)
1404 + : "r" (ptr), "r" (current_thread_info()->addr_limit)
1405 + : "cc");
1406 +
1407 + csdb();
1408 + return safe_ptr;
1409 +}
1410 +
1411 /*
1412 * The "__xxx" versions of the user access functions do not verify the address
1413 * space - it must have been done previously with a separate "access_ok()"
1414 @@ -244,28 +294,33 @@ do { \
1415 (x) = (__force __typeof__(*(ptr)))__gu_val; \
1416 } while (0)
1417
1418 -#define __get_user(x, ptr) \
1419 +#define __get_user_check(x, ptr, err) \
1420 ({ \
1421 - int __gu_err = 0; \
1422 - __get_user_err((x), (ptr), __gu_err); \
1423 - __gu_err; \
1424 + __typeof__(*(ptr)) __user *__p = (ptr); \
1425 + might_fault(); \
1426 + if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
1427 + __p = uaccess_mask_ptr(__p); \
1428 + __get_user_err((x), __p, (err)); \
1429 + } else { \
1430 + (x) = 0; (err) = -EFAULT; \
1431 + } \
1432 })
1433
1434 #define __get_user_error(x, ptr, err) \
1435 ({ \
1436 - __get_user_err((x), (ptr), (err)); \
1437 + __get_user_check((x), (ptr), (err)); \
1438 (void)0; \
1439 })
1440
1441 -#define get_user(x, ptr) \
1442 +#define __get_user(x, ptr) \
1443 ({ \
1444 - __typeof__(*(ptr)) __user *__p = (ptr); \
1445 - might_fault(); \
1446 - access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
1447 - __get_user((x), __p) : \
1448 - ((x) = 0, -EFAULT); \
1449 + int __gu_err = 0; \
1450 + __get_user_check((x), (ptr), __gu_err); \
1451 + __gu_err; \
1452 })
1453
1454 +#define get_user __get_user
1455 +
1456 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
1457 asm volatile( \
1458 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
1459 @@ -308,43 +363,63 @@ do { \
1460 uaccess_disable_not_uao(); \
1461 } while (0)
1462
1463 -#define __put_user(x, ptr) \
1464 +#define __put_user_check(x, ptr, err) \
1465 ({ \
1466 - int __pu_err = 0; \
1467 - __put_user_err((x), (ptr), __pu_err); \
1468 - __pu_err; \
1469 + __typeof__(*(ptr)) __user *__p = (ptr); \
1470 + might_fault(); \
1471 + if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
1472 + __p = uaccess_mask_ptr(__p); \
1473 + __put_user_err((x), __p, (err)); \
1474 + } else { \
1475 + (err) = -EFAULT; \
1476 + } \
1477 })
1478
1479 #define __put_user_error(x, ptr, err) \
1480 ({ \
1481 - __put_user_err((x), (ptr), (err)); \
1482 + __put_user_check((x), (ptr), (err)); \
1483 (void)0; \
1484 })
1485
1486 -#define put_user(x, ptr) \
1487 +#define __put_user(x, ptr) \
1488 ({ \
1489 - __typeof__(*(ptr)) __user *__p = (ptr); \
1490 - might_fault(); \
1491 - access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
1492 - __put_user((x), __p) : \
1493 - -EFAULT; \
1494 + int __pu_err = 0; \
1495 + __put_user_check((x), (ptr), __pu_err); \
1496 + __pu_err; \
1497 })
1498
1499 +#define put_user __put_user
1500 +
1501 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
1502 -#define raw_copy_from_user __arch_copy_from_user
1503 +#define raw_copy_from_user(to, from, n) \
1504 +({ \
1505 + __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
1506 +})
1507 +
1508 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
1509 -#define raw_copy_to_user __arch_copy_to_user
1510 -extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
1511 -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1512 +#define raw_copy_to_user(to, from, n) \
1513 +({ \
1514 + __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
1515 +})
1516 +
1517 +extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
1518 +#define raw_copy_in_user(to, from, n) \
1519 +({ \
1520 + __arch_copy_in_user(__uaccess_mask_ptr(to), \
1521 + __uaccess_mask_ptr(from), (n)); \
1522 +})
1523 +
1524 #define INLINE_COPY_TO_USER
1525 #define INLINE_COPY_FROM_USER
1526
1527 -static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
1528 +extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
1529 +static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
1530 {
1531 if (access_ok(VERIFY_WRITE, to, n))
1532 - n = __clear_user(to, n);
1533 + n = __arch_clear_user(__uaccess_mask_ptr(to), n);
1534 return n;
1535 }
1536 +#define clear_user __clear_user
1537
1538 extern long strncpy_from_user(char *dest, const char __user *src, long count);
1539
1540 @@ -358,7 +433,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __
1541 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
1542 {
1543 kasan_check_write(dst, size);
1544 - return __copy_user_flushcache(dst, src, size);
1545 + return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
1546 }
1547 #endif
1548
1549 diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
1550 index 2f5ff2a65db3..def8d5623fd1 100644
1551 --- a/arch/arm64/kernel/Makefile
1552 +++ b/arch/arm64/kernel/Makefile
1553 @@ -55,6 +55,10 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
1554 arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
1555 arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
1556
1557 +ifeq ($(CONFIG_KVM),y)
1558 +arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
1559 +endif
1560 +
1561 obj-y += $(arm64-obj-y) vdso/ probes/
1562 obj-m += $(arm64-obj-m)
1563 head-y := head.o
1564 diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
1565 index 67368c7329c0..66be504edb6c 100644
1566 --- a/arch/arm64/kernel/arm64ksyms.c
1567 +++ b/arch/arm64/kernel/arm64ksyms.c
1568 @@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page);
1569 /* user mem (segment) */
1570 EXPORT_SYMBOL(__arch_copy_from_user);
1571 EXPORT_SYMBOL(__arch_copy_to_user);
1572 -EXPORT_SYMBOL(__clear_user);
1573 -EXPORT_SYMBOL(raw_copy_in_user);
1574 +EXPORT_SYMBOL(__arch_clear_user);
1575 +EXPORT_SYMBOL(__arch_copy_in_user);
1576
1577 /* physical memory */
1578 EXPORT_SYMBOL(memstart_addr);
1579 diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
1580 index 71bf088f1e4b..af247d10252f 100644
1581 --- a/arch/arm64/kernel/asm-offsets.c
1582 +++ b/arch/arm64/kernel/asm-offsets.c
1583 @@ -24,6 +24,7 @@
1584 #include <linux/kvm_host.h>
1585 #include <linux/suspend.h>
1586 #include <asm/cpufeature.h>
1587 +#include <asm/fixmap.h>
1588 #include <asm/thread_info.h>
1589 #include <asm/memory.h>
1590 #include <asm/smp_plat.h>
1591 @@ -148,11 +149,14 @@ int main(void)
1592 DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
1593 DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
1594 DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
1595 -
1596 BLANK();
1597 DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
1598 DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
1599 DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
1600 DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val));
1601 + BLANK();
1602 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1603 + DEFINE(TRAMP_VALIAS, TRAMP_VALIAS);
1604 +#endif
1605 return 0;
1606 }
1607 diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
1608 new file mode 100644
1609 index 000000000000..e5de33513b5d
1610 --- /dev/null
1611 +++ b/arch/arm64/kernel/bpi.S
1612 @@ -0,0 +1,83 @@
1613 +/*
1614 + * Contains CPU specific branch predictor invalidation sequences
1615 + *
1616 + * Copyright (C) 2018 ARM Ltd.
1617 + *
1618 + * This program is free software; you can redistribute it and/or modify
1619 + * it under the terms of the GNU General Public License version 2 as
1620 + * published by the Free Software Foundation.
1621 + *
1622 + * This program is distributed in the hope that it will be useful,
1623 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1624 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1625 + * GNU General Public License for more details.
1626 + *
1627 + * You should have received a copy of the GNU General Public License
1628 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1629 + */
1630 +
1631 +#include <linux/linkage.h>
1632 +#include <linux/arm-smccc.h>
1633 +
1634 +.macro ventry target
1635 + .rept 31
1636 + nop
1637 + .endr
1638 + b \target
1639 +.endm
1640 +
1641 +.macro vectors target
1642 + ventry \target + 0x000
1643 + ventry \target + 0x080
1644 + ventry \target + 0x100
1645 + ventry \target + 0x180
1646 +
1647 + ventry \target + 0x200
1648 + ventry \target + 0x280
1649 + ventry \target + 0x300
1650 + ventry \target + 0x380
1651 +
1652 + ventry \target + 0x400
1653 + ventry \target + 0x480
1654 + ventry \target + 0x500
1655 + ventry \target + 0x580
1656 +
1657 + ventry \target + 0x600
1658 + ventry \target + 0x680
1659 + ventry \target + 0x700
1660 + ventry \target + 0x780
1661 +.endm
1662 +
1663 + .align 11
1664 +ENTRY(__bp_harden_hyp_vecs_start)
1665 + .rept 4
1666 + vectors __kvm_hyp_vector
1667 + .endr
1668 +ENTRY(__bp_harden_hyp_vecs_end)
1669 +
1670 +ENTRY(__qcom_hyp_sanitize_link_stack_start)
1671 + stp x29, x30, [sp, #-16]!
1672 + .rept 16
1673 + bl . + 4
1674 + .endr
1675 + ldp x29, x30, [sp], #16
1676 +ENTRY(__qcom_hyp_sanitize_link_stack_end)
1677 +
1678 +.macro smccc_workaround_1 inst
1679 + sub sp, sp, #(8 * 4)
1680 + stp x2, x3, [sp, #(8 * 0)]
1681 + stp x0, x1, [sp, #(8 * 2)]
1682 + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
1683 + \inst #0
1684 + ldp x2, x3, [sp, #(8 * 0)]
1685 + ldp x0, x1, [sp, #(8 * 2)]
1686 + add sp, sp, #(8 * 4)
1687 +.endm
1688 +
1689 +ENTRY(__smccc_workaround_1_smc_start)
1690 + smccc_workaround_1 smc
1691 +ENTRY(__smccc_workaround_1_smc_end)
1692 +
1693 +ENTRY(__smccc_workaround_1_hvc_start)
1694 + smccc_workaround_1 hvc
1695 +ENTRY(__smccc_workaround_1_hvc_end)
1696 diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
1697 index 65f42d257414..8021b46c9743 100644
1698 --- a/arch/arm64/kernel/cpu-reset.S
1699 +++ b/arch/arm64/kernel/cpu-reset.S
1700 @@ -16,7 +16,7 @@
1701 #include <asm/virt.h>
1702
1703 .text
1704 -.pushsection .idmap.text, "ax"
1705 +.pushsection .idmap.text, "awx"
1706
1707 /*
1708 * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
1709 @@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart)
1710 mrs x12, sctlr_el1
1711 ldr x13, =SCTLR_ELx_FLAGS
1712 bic x12, x12, x13
1713 + pre_disable_mmu_workaround
1714 msr sctlr_el1, x12
1715 isb
1716
1717 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
1718 index 0e27f86ee709..07823595b7f0 100644
1719 --- a/arch/arm64/kernel/cpu_errata.c
1720 +++ b/arch/arm64/kernel/cpu_errata.c
1721 @@ -30,6 +30,20 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
1722 entry->midr_range_max);
1723 }
1724
1725 +static bool __maybe_unused
1726 +is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
1727 +{
1728 + u32 model;
1729 +
1730 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
1731 +
1732 + model = read_cpuid_id();
1733 + model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
1734 + MIDR_ARCHITECTURE_MASK;
1735 +
1736 + return model == entry->midr_model;
1737 +}
1738 +
1739 static bool
1740 has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
1741 int scope)
1742 @@ -46,6 +60,174 @@ static int cpu_enable_trap_ctr_access(void *__unused)
1743 return 0;
1744 }
1745
1746 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1747 +#include <asm/mmu_context.h>
1748 +#include <asm/cacheflush.h>
1749 +
1750 +DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
1751 +
1752 +#ifdef CONFIG_KVM
1753 +extern char __qcom_hyp_sanitize_link_stack_start[];
1754 +extern char __qcom_hyp_sanitize_link_stack_end[];
1755 +extern char __smccc_workaround_1_smc_start[];
1756 +extern char __smccc_workaround_1_smc_end[];
1757 +extern char __smccc_workaround_1_hvc_start[];
1758 +extern char __smccc_workaround_1_hvc_end[];
1759 +
1760 +static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
1761 + const char *hyp_vecs_end)
1762 +{
1763 + void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
1764 + int i;
1765 +
1766 + for (i = 0; i < SZ_2K; i += 0x80)
1767 + memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
1768 +
1769 + flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
1770 +}
1771 +
1772 +static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
1773 + const char *hyp_vecs_start,
1774 + const char *hyp_vecs_end)
1775 +{
1776 + static int last_slot = -1;
1777 + static DEFINE_SPINLOCK(bp_lock);
1778 + int cpu, slot = -1;
1779 +
1780 + spin_lock(&bp_lock);
1781 + for_each_possible_cpu(cpu) {
1782 + if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
1783 + slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
1784 + break;
1785 + }
1786 + }
1787 +
1788 + if (slot == -1) {
1789 + last_slot++;
1790 + BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
1791 + / SZ_2K) <= last_slot);
1792 + slot = last_slot;
1793 + __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
1794 + }
1795 +
1796 + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
1797 + __this_cpu_write(bp_hardening_data.fn, fn);
1798 + spin_unlock(&bp_lock);
1799 +}
1800 +#else
1801 +#define __qcom_hyp_sanitize_link_stack_start NULL
1802 +#define __qcom_hyp_sanitize_link_stack_end NULL
1803 +#define __smccc_workaround_1_smc_start NULL
1804 +#define __smccc_workaround_1_smc_end NULL
1805 +#define __smccc_workaround_1_hvc_start NULL
1806 +#define __smccc_workaround_1_hvc_end NULL
1807 +
1808 +static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
1809 + const char *hyp_vecs_start,
1810 + const char *hyp_vecs_end)
1811 +{
1812 + __this_cpu_write(bp_hardening_data.fn, fn);
1813 +}
1814 +#endif /* CONFIG_KVM */
1815 +
1816 +static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
1817 + bp_hardening_cb_t fn,
1818 + const char *hyp_vecs_start,
1819 + const char *hyp_vecs_end)
1820 +{
1821 + u64 pfr0;
1822 +
1823 + if (!entry->matches(entry, SCOPE_LOCAL_CPU))
1824 + return;
1825 +
1826 + pfr0 = read_cpuid(ID_AA64PFR0_EL1);
1827 + if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
1828 + return;
1829 +
1830 + __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
1831 +}
1832 +
1833 +#include <uapi/linux/psci.h>
1834 +#include <linux/arm-smccc.h>
1835 +#include <linux/psci.h>
1836 +
1837 +static void call_smc_arch_workaround_1(void)
1838 +{
1839 + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
1840 +}
1841 +
1842 +static void call_hvc_arch_workaround_1(void)
1843 +{
1844 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
1845 +}
1846 +
1847 +static int enable_smccc_arch_workaround_1(void *data)
1848 +{
1849 + const struct arm64_cpu_capabilities *entry = data;
1850 + bp_hardening_cb_t cb;
1851 + void *smccc_start, *smccc_end;
1852 + struct arm_smccc_res res;
1853 +
1854 + if (!entry->matches(entry, SCOPE_LOCAL_CPU))
1855 + return 0;
1856 +
1857 + if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
1858 + return 0;
1859 +
1860 + switch (psci_ops.conduit) {
1861 + case PSCI_CONDUIT_HVC:
1862 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1863 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1864 + if (res.a0)
1865 + return 0;
1866 + cb = call_hvc_arch_workaround_1;
1867 + smccc_start = __smccc_workaround_1_hvc_start;
1868 + smccc_end = __smccc_workaround_1_hvc_end;
1869 + break;
1870 +
1871 + case PSCI_CONDUIT_SMC:
1872 + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1873 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1874 + if (res.a0)
1875 + return 0;
1876 + cb = call_smc_arch_workaround_1;
1877 + smccc_start = __smccc_workaround_1_smc_start;
1878 + smccc_end = __smccc_workaround_1_smc_end;
1879 + break;
1880 +
1881 + default:
1882 + return 0;
1883 + }
1884 +
1885 + install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
1886 +
1887 + return 0;
1888 +}
1889 +
1890 +static void qcom_link_stack_sanitization(void)
1891 +{
1892 + u64 tmp;
1893 +
1894 + asm volatile("mov %0, x30 \n"
1895 + ".rept 16 \n"
1896 + "bl . + 4 \n"
1897 + ".endr \n"
1898 + "mov x30, %0 \n"
1899 + : "=&r" (tmp));
1900 +}
1901 +
1902 +static int qcom_enable_link_stack_sanitization(void *data)
1903 +{
1904 + const struct arm64_cpu_capabilities *entry = data;
1905 +
1906 + install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
1907 + __qcom_hyp_sanitize_link_stack_start,
1908 + __qcom_hyp_sanitize_link_stack_end);
1909 +
1910 + return 0;
1911 +}
1912 +#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
1913 +
1914 #define MIDR_RANGE(model, min, max) \
1915 .def_scope = SCOPE_LOCAL_CPU, \
1916 .matches = is_affected_midr_range, \
1917 @@ -169,6 +351,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1918 MIDR_CPU_VAR_REV(0, 0),
1919 MIDR_CPU_VAR_REV(0, 0)),
1920 },
1921 + {
1922 + .desc = "Qualcomm Technologies Kryo erratum 1003",
1923 + .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
1924 + .def_scope = SCOPE_LOCAL_CPU,
1925 + .midr_model = MIDR_QCOM_KRYO,
1926 + .matches = is_kryo_midr,
1927 + },
1928 #endif
1929 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
1930 {
1931 @@ -186,6 +375,47 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1932 .capability = ARM64_WORKAROUND_858921,
1933 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1934 },
1935 +#endif
1936 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1937 + {
1938 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1939 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
1940 + .enable = enable_smccc_arch_workaround_1,
1941 + },
1942 + {
1943 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1944 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
1945 + .enable = enable_smccc_arch_workaround_1,
1946 + },
1947 + {
1948 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1949 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1950 + .enable = enable_smccc_arch_workaround_1,
1951 + },
1952 + {
1953 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1954 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
1955 + .enable = enable_smccc_arch_workaround_1,
1956 + },
1957 + {
1958 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1959 + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
1960 + .enable = qcom_enable_link_stack_sanitization,
1961 + },
1962 + {
1963 + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
1964 + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
1965 + },
1966 + {
1967 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1968 + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
1969 + .enable = enable_smccc_arch_workaround_1,
1970 + },
1971 + {
1972 + .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
1973 + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
1974 + .enable = enable_smccc_arch_workaround_1,
1975 + },
1976 #endif
1977 {
1978 }
1979 @@ -200,15 +430,18 @@ void verify_local_cpu_errata_workarounds(void)
1980 {
1981 const struct arm64_cpu_capabilities *caps = arm64_errata;
1982
1983 - for (; caps->matches; caps++)
1984 - if (!cpus_have_cap(caps->capability) &&
1985 - caps->matches(caps, SCOPE_LOCAL_CPU)) {
1986 + for (; caps->matches; caps++) {
1987 + if (cpus_have_cap(caps->capability)) {
1988 + if (caps->enable)
1989 + caps->enable((void *)caps);
1990 + } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
1991 pr_crit("CPU%d: Requires work around for %s, not detected"
1992 " at boot time\n",
1993 smp_processor_id(),
1994 caps->desc ? : "an erratum");
1995 cpu_die_early();
1996 }
1997 + }
1998 }
1999
2000 void update_cpu_errata_workarounds(void)
2001 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
2002 index 21e2c95d24e7..582142ae92e1 100644
2003 --- a/arch/arm64/kernel/cpufeature.c
2004 +++ b/arch/arm64/kernel/cpufeature.c
2005 @@ -125,6 +125,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
2006 };
2007
2008 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
2009 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
2010 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
2011 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
2012 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
2013 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
2014 @@ -796,6 +798,86 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
2015 ID_AA64PFR0_FP_SHIFT) < 0;
2016 }
2017
2018 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2019 +static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
2020 +
2021 +static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
2022 + int __unused)
2023 +{
2024 + char const *str = "command line option";
2025 + u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2026 +
2027 + /*
2028 + * For reasons that aren't entirely clear, enabling KPTI on Cavium
2029 + * ThunderX leads to apparent I-cache corruption of kernel text, which
2030 + * ends as well as you might imagine. Don't even try.
2031 + */
2032 + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
2033 + str = "ARM64_WORKAROUND_CAVIUM_27456";
2034 + __kpti_forced = -1;
2035 + }
2036 +
2037 + /* Forced? */
2038 + if (__kpti_forced) {
2039 + pr_info_once("kernel page table isolation forced %s by %s\n",
2040 + __kpti_forced > 0 ? "ON" : "OFF", str);
2041 + return __kpti_forced > 0;
2042 + }
2043 +
2044 + /* Useful for KASLR robustness */
2045 + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
2046 + return true;
2047 +
2048 + /* Don't force KPTI for CPUs that are not vulnerable */
2049 + switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
2050 + case MIDR_CAVIUM_THUNDERX2:
2051 + case MIDR_BRCM_VULCAN:
2052 + return false;
2053 + }
2054 +
2055 + /* Defer to CPU feature registers */
2056 + return !cpuid_feature_extract_unsigned_field(pfr0,
2057 + ID_AA64PFR0_CSV3_SHIFT);
2058 +}
2059 +
2060 +static int kpti_install_ng_mappings(void *__unused)
2061 +{
2062 + typedef void (kpti_remap_fn)(int, int, phys_addr_t);
2063 + extern kpti_remap_fn idmap_kpti_install_ng_mappings;
2064 + kpti_remap_fn *remap_fn;
2065 +
2066 + static bool kpti_applied = false;
2067 + int cpu = smp_processor_id();
2068 +
2069 + if (kpti_applied)
2070 + return 0;
2071 +
2072 + remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
2073 +
2074 + cpu_install_idmap();
2075 + remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
2076 + cpu_uninstall_idmap();
2077 +
2078 + if (!cpu)
2079 + kpti_applied = true;
2080 +
2081 + return 0;
2082 +}
2083 +
2084 +static int __init parse_kpti(char *str)
2085 +{
2086 + bool enabled;
2087 + int ret = strtobool(str, &enabled);
2088 +
2089 + if (ret)
2090 + return ret;
2091 +
2092 + __kpti_forced = enabled ? 1 : -1;
2093 + return 0;
2094 +}
2095 +__setup("kpti=", parse_kpti);
2096 +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
2097 +
2098 static const struct arm64_cpu_capabilities arm64_features[] = {
2099 {
2100 .desc = "GIC system register CPU interface",
2101 @@ -882,6 +964,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
2102 .def_scope = SCOPE_SYSTEM,
2103 .matches = hyp_offset_low,
2104 },
2105 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2106 + {
2107 + .desc = "Kernel page table isolation (KPTI)",
2108 + .capability = ARM64_UNMAP_KERNEL_AT_EL0,
2109 + .def_scope = SCOPE_SYSTEM,
2110 + .matches = unmap_kernel_at_el0,
2111 + .enable = kpti_install_ng_mappings,
2112 + },
2113 +#endif
2114 {
2115 /* FP/SIMD is not implemented */
2116 .capability = ARM64_HAS_NO_FPSIMD,
2117 @@ -1000,6 +1091,25 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
2118 cap_set_elf_hwcap(hwcaps);
2119 }
2120
2121 +/*
2122 + * Check if the current CPU has a given feature capability.
2123 + * Should be called from non-preemptible context.
2124 + */
2125 +static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
2126 + unsigned int cap)
2127 +{
2128 + const struct arm64_cpu_capabilities *caps;
2129 +
2130 + if (WARN_ON(preemptible()))
2131 + return false;
2132 +
2133 + for (caps = cap_array; caps->matches; caps++)
2134 + if (caps->capability == cap &&
2135 + caps->matches(caps, SCOPE_LOCAL_CPU))
2136 + return true;
2137 + return false;
2138 +}
2139 +
2140 void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
2141 const char *info)
2142 {
2143 @@ -1035,7 +1145,7 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
2144 * uses an IPI, giving us a PSTATE that disappears when
2145 * we return.
2146 */
2147 - stop_machine(caps->enable, NULL, cpu_online_mask);
2148 + stop_machine(caps->enable, (void *)caps, cpu_online_mask);
2149 }
2150 }
2151 }
2152 @@ -1078,8 +1188,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
2153 }
2154
2155 static void
2156 -verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
2157 +verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
2158 {
2159 + const struct arm64_cpu_capabilities *caps = caps_list;
2160 for (; caps->matches; caps++) {
2161 if (!cpus_have_cap(caps->capability))
2162 continue;
2163 @@ -1087,13 +1198,13 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
2164 * If the new CPU misses an advertised feature, we cannot proceed
2165 * further, park the cpu.
2166 */
2167 - if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
2168 + if (!__this_cpu_has_cap(caps_list, caps->capability)) {
2169 pr_crit("CPU%d: missing feature: %s\n",
2170 smp_processor_id(), caps->desc);
2171 cpu_die_early();
2172 }
2173 if (caps->enable)
2174 - caps->enable(NULL);
2175 + caps->enable((void *)caps);
2176 }
2177 }
2178
2179 @@ -1148,25 +1259,6 @@ static void __init mark_const_caps_ready(void)
2180 static_branch_enable(&arm64_const_caps_ready);
2181 }
2182
2183 -/*
2184 - * Check if the current CPU has a given feature capability.
2185 - * Should be called from non-preemptible context.
2186 - */
2187 -static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
2188 - unsigned int cap)
2189 -{
2190 - const struct arm64_cpu_capabilities *caps;
2191 -
2192 - if (WARN_ON(preemptible()))
2193 - return false;
2194 -
2195 - for (caps = cap_array; caps->desc; caps++)
2196 - if (caps->capability == cap && caps->matches)
2197 - return caps->matches(caps, SCOPE_LOCAL_CPU);
2198 -
2199 - return false;
2200 -}
2201 -
2202 extern const struct arm64_cpu_capabilities arm64_errata[];
2203
2204 bool this_cpu_has_cap(unsigned int cap)
2205 diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
2206 index 4e6ad355bd05..6b9736c3fb56 100644
2207 --- a/arch/arm64/kernel/efi-entry.S
2208 +++ b/arch/arm64/kernel/efi-entry.S
2209 @@ -96,6 +96,7 @@ ENTRY(entry)
2210 mrs x0, sctlr_el2
2211 bic x0, x0, #1 << 0 // clear SCTLR.M
2212 bic x0, x0, #1 << 2 // clear SCTLR.C
2213 + pre_disable_mmu_workaround
2214 msr sctlr_el2, x0
2215 isb
2216 b 2f
2217 @@ -103,6 +104,7 @@ ENTRY(entry)
2218 mrs x0, sctlr_el1
2219 bic x0, x0, #1 << 0 // clear SCTLR.M
2220 bic x0, x0, #1 << 2 // clear SCTLR.C
2221 + pre_disable_mmu_workaround
2222 msr sctlr_el1, x0
2223 isb
2224 2:
2225 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
2226 index e1c59d4008a8..93958d1341bb 100644
2227 --- a/arch/arm64/kernel/entry.S
2228 +++ b/arch/arm64/kernel/entry.S
2229 @@ -29,6 +29,8 @@
2230 #include <asm/esr.h>
2231 #include <asm/irq.h>
2232 #include <asm/memory.h>
2233 +#include <asm/mmu.h>
2234 +#include <asm/processor.h>
2235 #include <asm/ptrace.h>
2236 #include <asm/thread_info.h>
2237 #include <asm/asm-uaccess.h>
2238 @@ -69,8 +71,21 @@
2239 #define BAD_FIQ 2
2240 #define BAD_ERROR 3
2241
2242 - .macro kernel_ventry label
2243 + .macro kernel_ventry, el, label, regsize = 64
2244 .align 7
2245 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2246 +alternative_if ARM64_UNMAP_KERNEL_AT_EL0
2247 + .if \el == 0
2248 + .if \regsize == 64
2249 + mrs x30, tpidrro_el0
2250 + msr tpidrro_el0, xzr
2251 + .else
2252 + mov x30, xzr
2253 + .endif
2254 + .endif
2255 +alternative_else_nop_endif
2256 +#endif
2257 +
2258 sub sp, sp, #S_FRAME_SIZE
2259 #ifdef CONFIG_VMAP_STACK
2260 /*
2261 @@ -82,7 +97,7 @@
2262 tbnz x0, #THREAD_SHIFT, 0f
2263 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
2264 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
2265 - b \label
2266 + b el\()\el\()_\label
2267
2268 0:
2269 /*
2270 @@ -114,7 +129,12 @@
2271 sub sp, sp, x0
2272 mrs x0, tpidrro_el0
2273 #endif
2274 - b \label
2275 + b el\()\el\()_\label
2276 + .endm
2277 +
2278 + .macro tramp_alias, dst, sym
2279 + mov_q \dst, TRAMP_VALIAS
2280 + add \dst, \dst, #(\sym - .entry.tramp.text)
2281 .endm
2282
2283 .macro kernel_entry, el, regsize = 64
2284 @@ -147,10 +167,10 @@
2285 .else
2286 add x21, sp, #S_FRAME_SIZE
2287 get_thread_info tsk
2288 - /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
2289 + /* Save the task's original addr_limit and set USER_DS */
2290 ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
2291 str x20, [sp, #S_ORIG_ADDR_LIMIT]
2292 - mov x20, #TASK_SIZE_64
2293 + mov x20, #USER_DS
2294 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
2295 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
2296 .endif /* \el == 0 */
2297 @@ -185,7 +205,7 @@ alternative_else_nop_endif
2298
2299 .if \el != 0
2300 mrs x21, ttbr0_el1
2301 - tst x21, #0xffff << 48 // Check for the reserved ASID
2302 + tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
2303 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
2304 b.eq 1f // TTBR0 access already disabled
2305 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
2306 @@ -246,7 +266,7 @@ alternative_else_nop_endif
2307 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
2308 .endif
2309
2310 - __uaccess_ttbr0_enable x0
2311 + __uaccess_ttbr0_enable x0, x1
2312
2313 .if \el == 0
2314 /*
2315 @@ -255,7 +275,7 @@ alternative_else_nop_endif
2316 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
2317 * corruption).
2318 */
2319 - post_ttbr0_update_workaround
2320 + bl post_ttbr_update_workaround
2321 .endif
2322 1:
2323 .if \el != 0
2324 @@ -267,18 +287,20 @@ alternative_else_nop_endif
2325 .if \el == 0
2326 ldr x23, [sp, #S_SP] // load return stack pointer
2327 msr sp_el0, x23
2328 + tst x22, #PSR_MODE32_BIT // native task?
2329 + b.eq 3f
2330 +
2331 #ifdef CONFIG_ARM64_ERRATUM_845719
2332 alternative_if ARM64_WORKAROUND_845719
2333 - tbz x22, #4, 1f
2334 #ifdef CONFIG_PID_IN_CONTEXTIDR
2335 mrs x29, contextidr_el1
2336 msr contextidr_el1, x29
2337 #else
2338 msr contextidr_el1, xzr
2339 #endif
2340 -1:
2341 alternative_else_nop_endif
2342 #endif
2343 +3:
2344 .endif
2345
2346 msr elr_el1, x21 // set up the return data
2347 @@ -300,7 +322,21 @@ alternative_else_nop_endif
2348 ldp x28, x29, [sp, #16 * 14]
2349 ldr lr, [sp, #S_LR]
2350 add sp, sp, #S_FRAME_SIZE // restore sp
2351 - eret // return to kernel
2352 +
2353 + .if \el == 0
2354 +alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
2355 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2356 + bne 4f
2357 + msr far_el1, x30
2358 + tramp_alias x30, tramp_exit_native
2359 + br x30
2360 +4:
2361 + tramp_alias x30, tramp_exit_compat
2362 + br x30
2363 +#endif
2364 + .else
2365 + eret
2366 + .endif
2367 .endm
2368
2369 .macro irq_stack_entry
2370 @@ -340,6 +376,7 @@ alternative_else_nop_endif
2371 * x7 is reserved for the system call number in 32-bit mode.
2372 */
2373 wsc_nr .req w25 // number of system calls
2374 +xsc_nr .req x25 // number of system calls (zero-extended)
2375 wscno .req w26 // syscall number
2376 xscno .req x26 // syscall number (zero-extended)
2377 stbl .req x27 // syscall table pointer
2378 @@ -365,31 +402,31 @@ tsk .req x28 // current thread_info
2379
2380 .align 11
2381 ENTRY(vectors)
2382 - kernel_ventry el1_sync_invalid // Synchronous EL1t
2383 - kernel_ventry el1_irq_invalid // IRQ EL1t
2384 - kernel_ventry el1_fiq_invalid // FIQ EL1t
2385 - kernel_ventry el1_error_invalid // Error EL1t
2386 + kernel_ventry 1, sync_invalid // Synchronous EL1t
2387 + kernel_ventry 1, irq_invalid // IRQ EL1t
2388 + kernel_ventry 1, fiq_invalid // FIQ EL1t
2389 + kernel_ventry 1, error_invalid // Error EL1t
2390
2391 - kernel_ventry el1_sync // Synchronous EL1h
2392 - kernel_ventry el1_irq // IRQ EL1h
2393 - kernel_ventry el1_fiq_invalid // FIQ EL1h
2394 - kernel_ventry el1_error_invalid // Error EL1h
2395 + kernel_ventry 1, sync // Synchronous EL1h
2396 + kernel_ventry 1, irq // IRQ EL1h
2397 + kernel_ventry 1, fiq_invalid // FIQ EL1h
2398 + kernel_ventry 1, error_invalid // Error EL1h
2399
2400 - kernel_ventry el0_sync // Synchronous 64-bit EL0
2401 - kernel_ventry el0_irq // IRQ 64-bit EL0
2402 - kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
2403 - kernel_ventry el0_error_invalid // Error 64-bit EL0
2404 + kernel_ventry 0, sync // Synchronous 64-bit EL0
2405 + kernel_ventry 0, irq // IRQ 64-bit EL0
2406 + kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
2407 + kernel_ventry 0, error_invalid // Error 64-bit EL0
2408
2409 #ifdef CONFIG_COMPAT
2410 - kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
2411 - kernel_ventry el0_irq_compat // IRQ 32-bit EL0
2412 - kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
2413 - kernel_ventry el0_error_invalid_compat // Error 32-bit EL0
2414 + kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
2415 + kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
2416 + kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
2417 + kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0
2418 #else
2419 - kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
2420 - kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
2421 - kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0
2422 - kernel_ventry el0_error_invalid // Error 32-bit EL0
2423 + kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
2424 + kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
2425 + kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
2426 + kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
2427 #endif
2428 END(vectors)
2429
2430 @@ -687,13 +724,15 @@ el0_ia:
2431 * Instruction abort handling
2432 */
2433 mrs x26, far_el1
2434 - // enable interrupts before calling the main handler
2435 - enable_dbg_and_irq
2436 + enable_dbg
2437 +#ifdef CONFIG_TRACE_IRQFLAGS
2438 + bl trace_hardirqs_off
2439 +#endif
2440 ct_user_exit
2441 mov x0, x26
2442 mov x1, x25
2443 mov x2, sp
2444 - bl do_mem_abort
2445 + bl do_el0_ia_bp_hardening
2446 b ret_to_user
2447 el0_fpsimd_acc:
2448 /*
2449 @@ -720,8 +759,10 @@ el0_sp_pc:
2450 * Stack or PC alignment exception handling
2451 */
2452 mrs x26, far_el1
2453 - // enable interrupts before calling the main handler
2454 - enable_dbg_and_irq
2455 + enable_dbg
2456 +#ifdef CONFIG_TRACE_IRQFLAGS
2457 + bl trace_hardirqs_off
2458 +#endif
2459 ct_user_exit
2460 mov x0, x26
2461 mov x1, x25
2462 @@ -780,6 +821,11 @@ el0_irq_naked:
2463 #endif
2464
2465 ct_user_exit
2466 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
2467 + tbz x22, #55, 1f
2468 + bl do_el0_irq_bp_hardening
2469 +1:
2470 +#endif
2471 irq_handler
2472
2473 #ifdef CONFIG_TRACE_IRQFLAGS
2474 @@ -848,6 +894,7 @@ el0_svc_naked: // compat entry point
2475 b.ne __sys_trace
2476 cmp wscno, wsc_nr // check upper syscall limit
2477 b.hs ni_sys
2478 + mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number
2479 ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
2480 blr x16 // call sys_* routine
2481 b ret_fast_syscall
2482 @@ -895,6 +942,117 @@ __ni_sys_trace:
2483
2484 .popsection // .entry.text
2485
2486 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2487 +/*
2488 + * Exception vectors trampoline.
2489 + */
2490 + .pushsection ".entry.tramp.text", "ax"
2491 +
2492 + .macro tramp_map_kernel, tmp
2493 + mrs \tmp, ttbr1_el1
2494 + sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
2495 + bic \tmp, \tmp, #USER_ASID_FLAG
2496 + msr ttbr1_el1, \tmp
2497 +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
2498 +alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
2499 + /* ASID already in \tmp[63:48] */
2500 + movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
2501 + movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
2502 + /* 2MB boundary containing the vectors, so we nobble the walk cache */
2503 + movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
2504 + isb
2505 + tlbi vae1, \tmp
2506 + dsb nsh
2507 +alternative_else_nop_endif
2508 +#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
2509 + .endm
2510 +
2511 + .macro tramp_unmap_kernel, tmp
2512 + mrs \tmp, ttbr1_el1
2513 + add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
2514 + orr \tmp, \tmp, #USER_ASID_FLAG
2515 + msr ttbr1_el1, \tmp
2516 + /*
2517 + * We avoid running the post_ttbr_update_workaround here because
2518 + * it's only needed by Cavium ThunderX, which requires KPTI to be
2519 + * disabled.
2520 + */
2521 + .endm
2522 +
2523 + .macro tramp_ventry, regsize = 64
2524 + .align 7
2525 +1:
2526 + .if \regsize == 64
2527 + msr tpidrro_el0, x30 // Restored in kernel_ventry
2528 + .endif
2529 + /*
2530 + * Defend against branch aliasing attacks by pushing a dummy
2531 + * entry onto the return stack and using a RET instruction to
2532 + * enter the full-fat kernel vectors.
2533 + */
2534 + bl 2f
2535 + b .
2536 +2:
2537 + tramp_map_kernel x30
2538 +#ifdef CONFIG_RANDOMIZE_BASE
2539 + adr x30, tramp_vectors + PAGE_SIZE
2540 +alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
2541 + ldr x30, [x30]
2542 +#else
2543 + ldr x30, =vectors
2544 +#endif
2545 + prfm plil1strm, [x30, #(1b - tramp_vectors)]
2546 + msr vbar_el1, x30
2547 + add x30, x30, #(1b - tramp_vectors)
2548 + isb
2549 + ret
2550 + .endm
2551 +
2552 + .macro tramp_exit, regsize = 64
2553 + adr x30, tramp_vectors
2554 + msr vbar_el1, x30
2555 + tramp_unmap_kernel x30
2556 + .if \regsize == 64
2557 + mrs x30, far_el1
2558 + .endif
2559 + eret
2560 + .endm
2561 +
2562 + .align 11
2563 +ENTRY(tramp_vectors)
2564 + .space 0x400
2565 +
2566 + tramp_ventry
2567 + tramp_ventry
2568 + tramp_ventry
2569 + tramp_ventry
2570 +
2571 + tramp_ventry 32
2572 + tramp_ventry 32
2573 + tramp_ventry 32
2574 + tramp_ventry 32
2575 +END(tramp_vectors)
2576 +
2577 +ENTRY(tramp_exit_native)
2578 + tramp_exit
2579 +END(tramp_exit_native)
2580 +
2581 +ENTRY(tramp_exit_compat)
2582 + tramp_exit 32
2583 +END(tramp_exit_compat)
2584 +
2585 + .ltorg
2586 + .popsection // .entry.tramp.text
2587 +#ifdef CONFIG_RANDOMIZE_BASE
2588 + .pushsection ".rodata", "a"
2589 + .align PAGE_SHIFT
2590 + .globl __entry_tramp_data_start
2591 +__entry_tramp_data_start:
2592 + .quad vectors
2593 + .popsection // .rodata
2594 +#endif /* CONFIG_RANDOMIZE_BASE */
2595 +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
2596 +
2597 /*
2598 * Special system call wrappers.
2599 */
2600 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
2601 index 0b243ecaf7ac..261f3f88364c 100644
2602 --- a/arch/arm64/kernel/head.S
2603 +++ b/arch/arm64/kernel/head.S
2604 @@ -371,7 +371,7 @@ ENDPROC(__primary_switched)
2605 * end early head section, begin head code that is also used for
2606 * hotplug and needs to have the same protections as the text region
2607 */
2608 - .section ".idmap.text","ax"
2609 + .section ".idmap.text","awx"
2610
2611 ENTRY(kimage_vaddr)
2612 .quad _text - TEXT_OFFSET
2613 @@ -732,6 +732,7 @@ __primary_switch:
2614 * to take into account by discarding the current kernel mapping and
2615 * creating a new one.
2616 */
2617 + pre_disable_mmu_workaround
2618 msr sctlr_el1, x20 // disable the MMU
2619 isb
2620 bl __create_page_tables // recreate kernel mapping
2621 diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
2622 index bcd22d7ee590..9e773732520c 100644
2623 --- a/arch/arm64/kernel/process.c
2624 +++ b/arch/arm64/kernel/process.c
2625 @@ -314,16 +314,14 @@ void tls_preserve_current_state(void)
2626
2627 static void tls_thread_switch(struct task_struct *next)
2628 {
2629 - unsigned long tpidr, tpidrro;
2630 -
2631 tls_preserve_current_state();
2632
2633 - tpidr = *task_user_tls(next);
2634 - tpidrro = is_compat_thread(task_thread_info(next)) ?
2635 - next->thread.tp_value : 0;
2636 + if (is_compat_thread(task_thread_info(next)))
2637 + write_sysreg(next->thread.tp_value, tpidrro_el0);
2638 + else if (!arm64_kernel_unmapped_at_el0())
2639 + write_sysreg(0, tpidrro_el0);
2640
2641 - write_sysreg(tpidr, tpidr_el0);
2642 - write_sysreg(tpidrro, tpidrro_el0);
2643 + write_sysreg(*task_user_tls(next), tpidr_el0);
2644 }
2645
2646 /* Restore the UAO state depending on next's addr_limit */
2647 diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
2648 index ce704a4aeadd..f407e422a720 100644
2649 --- a/arch/arm64/kernel/relocate_kernel.S
2650 +++ b/arch/arm64/kernel/relocate_kernel.S
2651 @@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel)
2652 mrs x0, sctlr_el2
2653 ldr x1, =SCTLR_ELx_FLAGS
2654 bic x0, x0, x1
2655 + pre_disable_mmu_workaround
2656 msr sctlr_el2, x0
2657 isb
2658 1:
2659 diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
2660 index 10dd16d7902d..bebec8ef9372 100644
2661 --- a/arch/arm64/kernel/sleep.S
2662 +++ b/arch/arm64/kernel/sleep.S
2663 @@ -96,7 +96,7 @@ ENTRY(__cpu_suspend_enter)
2664 ret
2665 ENDPROC(__cpu_suspend_enter)
2666
2667 - .pushsection ".idmap.text", "ax"
2668 + .pushsection ".idmap.text", "awx"
2669 ENTRY(cpu_resume)
2670 bl el2_setup // if in EL2 drop to EL1 cleanly
2671 bl __cpu_setup
2672 diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
2673 index 7da3e5c366a0..ddfd3c0942f7 100644
2674 --- a/arch/arm64/kernel/vmlinux.lds.S
2675 +++ b/arch/arm64/kernel/vmlinux.lds.S
2676 @@ -57,6 +57,17 @@ jiffies = jiffies_64;
2677 #define HIBERNATE_TEXT
2678 #endif
2679
2680 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2681 +#define TRAMP_TEXT \
2682 + . = ALIGN(PAGE_SIZE); \
2683 + VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \
2684 + *(.entry.tramp.text) \
2685 + . = ALIGN(PAGE_SIZE); \
2686 + VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
2687 +#else
2688 +#define TRAMP_TEXT
2689 +#endif
2690 +
2691 /*
2692 * The size of the PE/COFF section that covers the kernel image, which
2693 * runs from stext to _edata, must be a round multiple of the PE/COFF
2694 @@ -113,6 +124,7 @@ SECTIONS
2695 HYPERVISOR_TEXT
2696 IDMAP_TEXT
2697 HIBERNATE_TEXT
2698 + TRAMP_TEXT
2699 *(.fixup)
2700 *(.gnu.warning)
2701 . = ALIGN(16);
2702 @@ -214,6 +226,11 @@ SECTIONS
2703 . += RESERVED_TTBR0_SIZE;
2704 #endif
2705
2706 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2707 + tramp_pg_dir = .;
2708 + . += PAGE_SIZE;
2709 +#endif
2710 +
2711 __pecoff_data_size = ABSOLUTE(. - __initdata_begin);
2712 _end = .;
2713
2714 @@ -234,7 +251,10 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
2715 ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
2716 <= SZ_4K, "Hibernate exit text too big or misaligned")
2717 #endif
2718 -
2719 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
2720 +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
2721 + "Entry trampoline text too big")
2722 +#endif
2723 /*
2724 * If padding is applied before .head.text, virt<->phys conversions will fail.
2725 */
2726 diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
2727 index 380261e258ef..ab48c5ed3943 100644
2728 --- a/arch/arm64/kvm/handle_exit.c
2729 +++ b/arch/arm64/kvm/handle_exit.c
2730 @@ -22,12 +22,13 @@
2731 #include <linux/kvm.h>
2732 #include <linux/kvm_host.h>
2733
2734 +#include <kvm/arm_psci.h>
2735 +
2736 #include <asm/esr.h>
2737 #include <asm/kvm_asm.h>
2738 #include <asm/kvm_coproc.h>
2739 #include <asm/kvm_emulate.h>
2740 #include <asm/kvm_mmu.h>
2741 -#include <asm/kvm_psci.h>
2742
2743 #define CREATE_TRACE_POINTS
2744 #include "trace.h"
2745 @@ -42,7 +43,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
2746 kvm_vcpu_hvc_get_imm(vcpu));
2747 vcpu->stat.hvc_exit_stat++;
2748
2749 - ret = kvm_psci_call(vcpu);
2750 + ret = kvm_hvc_call_handler(vcpu);
2751 if (ret < 0) {
2752 vcpu_set_reg(vcpu, 0, ~0UL);
2753 return 1;
2754 @@ -53,7 +54,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
2755
2756 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
2757 {
2758 + /*
2759 + * "If an SMC instruction executed at Non-secure EL1 is
2760 + * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
2761 + * Trap exception, not a Secure Monitor Call exception [...]"
2762 + *
2763 + * We need to advance the PC after the trap, as it would
2764 + * otherwise return to the same address...
2765 + */
2766 vcpu_set_reg(vcpu, 0, ~0UL);
2767 + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
2768 return 1;
2769 }
2770
2771 diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
2772 index 3f9615582377..870828c364c5 100644
2773 --- a/arch/arm64/kvm/hyp-init.S
2774 +++ b/arch/arm64/kvm/hyp-init.S
2775 @@ -151,6 +151,7 @@ reset:
2776 mrs x5, sctlr_el2
2777 ldr x6, =SCTLR_ELx_FLAGS
2778 bic x5, x5, x6 // Clear SCTL_M and etc
2779 + pre_disable_mmu_workaround
2780 msr sctlr_el2, x5
2781 isb
2782
2783 diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
2784 index 12ee62d6d410..9c45c6af1f58 100644
2785 --- a/arch/arm64/kvm/hyp/entry.S
2786 +++ b/arch/arm64/kvm/hyp/entry.S
2787 @@ -196,3 +196,15 @@ alternative_endif
2788
2789 eret
2790 ENDPROC(__fpsimd_guest_restore)
2791 +
2792 +ENTRY(__qcom_hyp_sanitize_btac_predictors)
2793 + /**
2794 + * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700)
2795 + * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls
2796 + * b15-b0: contains SiP functionID
2797 + */
2798 + movz x0, #0x1700
2799 + movk x0, #0xc200, lsl #16
2800 + smc #0
2801 + ret
2802 +ENDPROC(__qcom_hyp_sanitize_btac_predictors)
2803 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
2804 index 5170ce1021da..f49b53331d28 100644
2805 --- a/arch/arm64/kvm/hyp/hyp-entry.S
2806 +++ b/arch/arm64/kvm/hyp/hyp-entry.S
2807 @@ -15,6 +15,7 @@
2808 * along with this program. If not, see <http://www.gnu.org/licenses/>.
2809 */
2810
2811 +#include <linux/arm-smccc.h>
2812 #include <linux/linkage.h>
2813
2814 #include <asm/alternative.h>
2815 @@ -64,10 +65,11 @@ alternative_endif
2816 lsr x0, x1, #ESR_ELx_EC_SHIFT
2817
2818 cmp x0, #ESR_ELx_EC_HVC64
2819 + ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
2820 b.ne el1_trap
2821
2822 - mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest
2823 - cbnz x1, el1_trap // called HVC
2824 + mrs x1, vttbr_el2 // If vttbr is valid, the guest
2825 + cbnz x1, el1_hvc_guest // called HVC
2826
2827 /* Here, we're pretty sure the host called HVC. */
2828 ldp x0, x1, [sp], #16
2829 @@ -100,6 +102,20 @@ alternative_endif
2830
2831 eret
2832
2833 +el1_hvc_guest:
2834 + /*
2835 + * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
2836 + * The workaround has already been applied on the host,
2837 + * so let's quickly get back to the guest. We don't bother
2838 + * restoring x1, as it can be clobbered anyway.
2839 + */
2840 + ldr x1, [sp] // Guest's x0
2841 + eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
2842 + cbnz w1, el1_trap
2843 + mov x0, x1
2844 + add sp, sp, #16
2845 + eret
2846 +
2847 el1_trap:
2848 /*
2849 * x0: ESR_EC
2850 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
2851 index 945e79c641c4..79364d3455c0 100644
2852 --- a/arch/arm64/kvm/hyp/switch.c
2853 +++ b/arch/arm64/kvm/hyp/switch.c
2854 @@ -17,6 +17,9 @@
2855
2856 #include <linux/types.h>
2857 #include <linux/jump_label.h>
2858 +#include <uapi/linux/psci.h>
2859 +
2860 +#include <kvm/arm_psci.h>
2861
2862 #include <asm/kvm_asm.h>
2863 #include <asm/kvm_emulate.h>
2864 @@ -51,7 +54,7 @@ static void __hyp_text __activate_traps_vhe(void)
2865 val &= ~CPACR_EL1_FPEN;
2866 write_sysreg(val, cpacr_el1);
2867
2868 - write_sysreg(__kvm_hyp_vector, vbar_el1);
2869 + write_sysreg(kvm_get_hyp_vector(), vbar_el1);
2870 }
2871
2872 static void __hyp_text __activate_traps_nvhe(void)
2873 @@ -364,6 +367,14 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
2874 /* 0 falls through to be handled out of EL2 */
2875 }
2876
2877 + if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
2878 + u32 midr = read_cpuid_id();
2879 +
2880 + /* Apply BTAC predictors mitigation to all Falkor chips */
2881 + if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)
2882 + __qcom_hyp_sanitize_btac_predictors();
2883 + }
2884 +
2885 fp_enabled = __fpsimd_enabled();
2886
2887 __sysreg_save_guest_state(guest_ctxt);
2888 diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
2889 index e88fb99c1561..21ba0b29621b 100644
2890 --- a/arch/arm64/lib/clear_user.S
2891 +++ b/arch/arm64/lib/clear_user.S
2892 @@ -21,7 +21,7 @@
2893
2894 .text
2895
2896 -/* Prototype: int __clear_user(void *addr, size_t sz)
2897 +/* Prototype: int __arch_clear_user(void *addr, size_t sz)
2898 * Purpose : clear some user memory
2899 * Params : addr - user memory address to clear
2900 * : sz - number of bytes to clear
2901 @@ -29,8 +29,8 @@
2902 *
2903 * Alignment fixed up by hardware.
2904 */
2905 -ENTRY(__clear_user)
2906 - uaccess_enable_not_uao x2, x3
2907 +ENTRY(__arch_clear_user)
2908 + uaccess_enable_not_uao x2, x3, x4
2909 mov x2, x1 // save the size for fixup return
2910 subs x1, x1, #8
2911 b.mi 2f
2912 @@ -50,9 +50,9 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
2913 b.mi 5f
2914 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
2915 5: mov x0, #0
2916 - uaccess_disable_not_uao x2
2917 + uaccess_disable_not_uao x2, x3
2918 ret
2919 -ENDPROC(__clear_user)
2920 +ENDPROC(__arch_clear_user)
2921
2922 .section .fixup,"ax"
2923 .align 2
2924 diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
2925 index 4b5d826895ff..20305d485046 100644
2926 --- a/arch/arm64/lib/copy_from_user.S
2927 +++ b/arch/arm64/lib/copy_from_user.S
2928 @@ -64,10 +64,10 @@
2929
2930 end .req x5
2931 ENTRY(__arch_copy_from_user)
2932 - uaccess_enable_not_uao x3, x4
2933 + uaccess_enable_not_uao x3, x4, x5
2934 add end, x0, x2
2935 #include "copy_template.S"
2936 - uaccess_disable_not_uao x3
2937 + uaccess_disable_not_uao x3, x4
2938 mov x0, #0 // Nothing to copy
2939 ret
2940 ENDPROC(__arch_copy_from_user)
2941 diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
2942 index b24a830419ad..54b75deb1d16 100644
2943 --- a/arch/arm64/lib/copy_in_user.S
2944 +++ b/arch/arm64/lib/copy_in_user.S
2945 @@ -64,14 +64,15 @@
2946 .endm
2947
2948 end .req x5
2949 -ENTRY(raw_copy_in_user)
2950 - uaccess_enable_not_uao x3, x4
2951 +
2952 +ENTRY(__arch_copy_in_user)
2953 + uaccess_enable_not_uao x3, x4, x5
2954 add end, x0, x2
2955 #include "copy_template.S"
2956 - uaccess_disable_not_uao x3
2957 + uaccess_disable_not_uao x3, x4
2958 mov x0, #0
2959 ret
2960 -ENDPROC(raw_copy_in_user)
2961 +ENDPROC(__arch_copy_in_user)
2962
2963 .section .fixup,"ax"
2964 .align 2
2965 diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
2966 index 351f0766f7a6..fda6172d6b88 100644
2967 --- a/arch/arm64/lib/copy_to_user.S
2968 +++ b/arch/arm64/lib/copy_to_user.S
2969 @@ -63,10 +63,10 @@
2970
2971 end .req x5
2972 ENTRY(__arch_copy_to_user)
2973 - uaccess_enable_not_uao x3, x4
2974 + uaccess_enable_not_uao x3, x4, x5
2975 add end, x0, x2
2976 #include "copy_template.S"
2977 - uaccess_disable_not_uao x3
2978 + uaccess_disable_not_uao x3, x4
2979 mov x0, #0
2980 ret
2981 ENDPROC(__arch_copy_to_user)
2982 diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
2983 index 7f1dbe962cf5..91464e7f77cc 100644
2984 --- a/arch/arm64/mm/cache.S
2985 +++ b/arch/arm64/mm/cache.S
2986 @@ -49,7 +49,7 @@ ENTRY(flush_icache_range)
2987 * - end - virtual end address of region
2988 */
2989 ENTRY(__flush_cache_user_range)
2990 - uaccess_ttbr0_enable x2, x3
2991 + uaccess_ttbr0_enable x2, x3, x4
2992 dcache_line_size x2, x3
2993 sub x3, x2, #1
2994 bic x4, x0, x3
2995 @@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
2996 isb
2997 mov x0, #0
2998 1:
2999 - uaccess_ttbr0_disable x1
3000 + uaccess_ttbr0_disable x1, x2
3001 ret
3002 9:
3003 mov x0, #-EFAULT
3004 diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
3005 index ab9f5f0fb2c7..9284788733d6 100644
3006 --- a/arch/arm64/mm/context.c
3007 +++ b/arch/arm64/mm/context.c
3008 @@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending;
3009
3010 #define ASID_MASK (~GENMASK(asid_bits - 1, 0))
3011 #define ASID_FIRST_VERSION (1UL << asid_bits)
3012 -#define NUM_USER_ASIDS ASID_FIRST_VERSION
3013 +
3014 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
3015 +#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
3016 +#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
3017 +#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
3018 +#else
3019 +#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
3020 +#define asid2idx(asid) ((asid) & ~ASID_MASK)
3021 +#define idx2asid(idx) asid2idx(idx)
3022 +#endif
3023
3024 /* Get the ASIDBits supported by the current CPU */
3025 static u32 get_cpu_asid_bits(void)
3026 @@ -79,13 +88,6 @@ void verify_cpu_asid_bits(void)
3027 }
3028 }
3029
3030 -static void set_reserved_asid_bits(void)
3031 -{
3032 - if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) &&
3033 - cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003))
3034 - __set_bit(FALKOR_RESERVED_ASID, asid_map);
3035 -}
3036 -
3037 static void flush_context(unsigned int cpu)
3038 {
3039 int i;
3040 @@ -94,8 +96,6 @@ static void flush_context(unsigned int cpu)
3041 /* Update the list of reserved ASIDs and the ASID bitmap. */
3042 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
3043
3044 - set_reserved_asid_bits();
3045 -
3046 /*
3047 * Ensure the generation bump is observed before we xchg the
3048 * active_asids.
3049 @@ -113,7 +113,7 @@ static void flush_context(unsigned int cpu)
3050 */
3051 if (asid == 0)
3052 asid = per_cpu(reserved_asids, i);
3053 - __set_bit(asid & ~ASID_MASK, asid_map);
3054 + __set_bit(asid2idx(asid), asid_map);
3055 per_cpu(reserved_asids, i) = asid;
3056 }
3057
3058 @@ -165,16 +165,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3059 * We had a valid ASID in a previous life, so try to re-use
3060 * it if possible.
3061 */
3062 - asid &= ~ASID_MASK;
3063 - if (!__test_and_set_bit(asid, asid_map))
3064 + if (!__test_and_set_bit(asid2idx(asid), asid_map))
3065 return newasid;
3066 }
3067
3068 /*
3069 * Allocate a free ASID. If we can't find one, take a note of the
3070 - * currently active ASIDs and mark the TLBs as requiring flushes.
3071 - * We always count from ASID #1, as we use ASID #0 when setting a
3072 - * reserved TTBR0 for the init_mm.
3073 + * currently active ASIDs and mark the TLBs as requiring flushes. We
3074 + * always count from ASID #2 (index 1), as we use ASID #0 when setting
3075 + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
3076 + * pairs.
3077 */
3078 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3079 if (asid != NUM_USER_ASIDS)
3080 @@ -191,7 +191,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3081 set_asid:
3082 __set_bit(asid, asid_map);
3083 cur_idx = asid;
3084 - return asid | generation;
3085 + return idx2asid(asid) | generation;
3086 }
3087
3088 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
3089 @@ -227,6 +227,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
3090 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
3091
3092 switch_mm_fastpath:
3093 +
3094 + arm64_apply_bp_hardening();
3095 +
3096 /*
3097 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
3098 * emulating PAN.
3099 @@ -235,6 +238,15 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
3100 cpu_switch_mm(mm->pgd, mm);
3101 }
3102
3103 +/* Errata workaround post TTBRx_EL1 update. */
3104 +asmlinkage void post_ttbr_update_workaround(void)
3105 +{
3106 + asm(ALTERNATIVE("nop; nop; nop",
3107 + "ic iallu; dsb nsh; isb",
3108 + ARM64_WORKAROUND_CAVIUM_27456,
3109 + CONFIG_CAVIUM_ERRATUM_27456));
3110 +}
3111 +
3112 static int asids_init(void)
3113 {
3114 asid_bits = get_cpu_asid_bits();
3115 @@ -250,8 +262,6 @@ static int asids_init(void)
3116 panic("Failed to allocate bitmap for %lu ASIDs\n",
3117 NUM_USER_ASIDS);
3118
3119 - set_reserved_asid_bits();
3120 -
3121 pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
3122 return 0;
3123 }
3124 diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
3125 index b64958b23a7f..5edb706aacb0 100644
3126 --- a/arch/arm64/mm/fault.c
3127 +++ b/arch/arm64/mm/fault.c
3128 @@ -242,7 +242,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
3129 if (fsc_type == ESR_ELx_FSC_PERM)
3130 return true;
3131
3132 - if (addr < USER_DS && system_uses_ttbr0_pan())
3133 + if (addr < TASK_SIZE && system_uses_ttbr0_pan())
3134 return fsc_type == ESR_ELx_FSC_FAULT &&
3135 (regs->pstate & PSR_PAN_BIT);
3136
3137 @@ -426,7 +426,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
3138 mm_flags |= FAULT_FLAG_WRITE;
3139 }
3140
3141 - if (addr < USER_DS && is_permission_fault(esr, regs, addr)) {
3142 + if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) {
3143 /* regs->orig_addr_limit may be 0 if we entered from EL0 */
3144 if (regs->orig_addr_limit == KERNEL_DS)
3145 die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
3146 @@ -751,6 +751,29 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
3147 arm64_notify_die("", regs, &info, esr);
3148 }
3149
3150 +asmlinkage void __exception do_el0_irq_bp_hardening(void)
3151 +{
3152 + /* PC has already been checked in entry.S */
3153 + arm64_apply_bp_hardening();
3154 +}
3155 +
3156 +asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
3157 + unsigned int esr,
3158 + struct pt_regs *regs)
3159 +{
3160 + /*
3161 + * We've taken an instruction abort from userspace and not yet
3162 + * re-enabled IRQs. If the address is a kernel address, apply
3163 + * BP hardening prior to enabling IRQs and pre-emption.
3164 + */
3165 + if (addr > TASK_SIZE)
3166 + arm64_apply_bp_hardening();
3167 +
3168 + local_irq_enable();
3169 + do_mem_abort(addr, esr, regs);
3170 +}
3171 +
3172 +
3173 /*
3174 * Handle stack alignment exceptions.
3175 */
3176 @@ -761,6 +784,12 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
3177 struct siginfo info;
3178 struct task_struct *tsk = current;
3179
3180 + if (user_mode(regs)) {
3181 + if (instruction_pointer(regs) > TASK_SIZE)
3182 + arm64_apply_bp_hardening();
3183 + local_irq_enable();
3184 + }
3185 +
3186 if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
3187 pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
3188 tsk->comm, task_pid_nr(tsk),
3189 @@ -820,6 +849,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
3190 if (interrupts_enabled(regs))
3191 trace_hardirqs_off();
3192
3193 + if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
3194 + arm64_apply_bp_hardening();
3195 +
3196 if (!inf->fn(addr, esr, regs)) {
3197 rv = 1;
3198 } else {
3199 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
3200 index f1eb15e0e864..fa20124c19d5 100644
3201 --- a/arch/arm64/mm/mmu.c
3202 +++ b/arch/arm64/mm/mmu.c
3203 @@ -117,6 +117,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
3204 if ((old | new) & PTE_CONT)
3205 return false;
3206
3207 + /* Transitioning from Global to Non-Global is safe */
3208 + if (((old ^ new) == PTE_NG) && (new & PTE_NG))
3209 + return true;
3210 +
3211 return ((old ^ new) & ~mask) == 0;
3212 }
3213
3214 @@ -525,6 +529,37 @@ static int __init parse_rodata(char *arg)
3215 }
3216 early_param("rodata", parse_rodata);
3217
3218 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
3219 +static int __init map_entry_trampoline(void)
3220 +{
3221 + extern char __entry_tramp_text_start[];
3222 +
3223 + pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
3224 + phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
3225 +
3226 + /* The trampoline is always mapped and can therefore be global */
3227 + pgprot_val(prot) &= ~PTE_NG;
3228 +
3229 + /* Map only the text into the trampoline page table */
3230 + memset(tramp_pg_dir, 0, PGD_SIZE);
3231 + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
3232 + prot, pgd_pgtable_alloc, 0);
3233 +
3234 + /* Map both the text and data into the kernel page table */
3235 + __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
3236 + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
3237 + extern char __entry_tramp_data_start[];
3238 +
3239 + __set_fixmap(FIX_ENTRY_TRAMP_DATA,
3240 + __pa_symbol(__entry_tramp_data_start),
3241 + PAGE_KERNEL_RO);
3242 + }
3243 +
3244 + return 0;
3245 +}
3246 +core_initcall(map_entry_trampoline);
3247 +#endif
3248 +
3249 /*
3250 * Create fine-grained mappings for the kernel.
3251 */
3252 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
3253 index 877d42fb0df6..27058f3fd132 100644
3254 --- a/arch/arm64/mm/proc.S
3255 +++ b/arch/arm64/mm/proc.S
3256 @@ -86,7 +86,7 @@ ENDPROC(cpu_do_suspend)
3257 *
3258 * x0: Address of context pointer
3259 */
3260 - .pushsection ".idmap.text", "ax"
3261 + .pushsection ".idmap.text", "awx"
3262 ENTRY(cpu_do_resume)
3263 ldp x2, x3, [x0]
3264 ldp x4, x5, [x0, #16]
3265 @@ -138,16 +138,30 @@ ENDPROC(cpu_do_resume)
3266 * - pgd_phys - physical address of new TTB
3267 */
3268 ENTRY(cpu_do_switch_mm)
3269 - pre_ttbr0_update_workaround x0, x2, x3
3270 + mrs x2, ttbr1_el1
3271 mmid x1, x1 // get mm->context.id
3272 - bfi x0, x1, #48, #16 // set the ASID
3273 - msr ttbr0_el1, x0 // set TTBR0
3274 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN
3275 + bfi x0, x1, #48, #16 // set the ASID field in TTBR0
3276 +#endif
3277 + bfi x2, x1, #48, #16 // set the ASID
3278 + msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set)
3279 isb
3280 - post_ttbr0_update_workaround
3281 - ret
3282 + msr ttbr0_el1, x0 // now update TTBR0
3283 + isb
3284 + b post_ttbr_update_workaround // Back to C code...
3285 ENDPROC(cpu_do_switch_mm)
3286
3287 - .pushsection ".idmap.text", "ax"
3288 + .pushsection ".idmap.text", "awx"
3289 +
3290 +.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
3291 + adrp \tmp1, empty_zero_page
3292 + msr ttbr1_el1, \tmp2
3293 + isb
3294 + tlbi vmalle1
3295 + dsb nsh
3296 + isb
3297 +.endm
3298 +
3299 /*
3300 * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
3301 *
3302 @@ -158,13 +172,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
3303 mrs x2, daif
3304 msr daifset, #0xf
3305
3306 - adrp x1, empty_zero_page
3307 - msr ttbr1_el1, x1
3308 - isb
3309 -
3310 - tlbi vmalle1
3311 - dsb nsh
3312 - isb
3313 + __idmap_cpu_set_reserved_ttbr1 x1, x3
3314
3315 msr ttbr1_el1, x0
3316 isb
3317 @@ -175,13 +183,196 @@ ENTRY(idmap_cpu_replace_ttbr1)
3318 ENDPROC(idmap_cpu_replace_ttbr1)
3319 .popsection
3320
3321 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
3322 + .pushsection ".idmap.text", "awx"
3323 +
3324 + .macro __idmap_kpti_get_pgtable_ent, type
3325 + dc cvac, cur_\()\type\()p // Ensure any existing dirty
3326 + dmb sy // lines are written back before
3327 + ldr \type, [cur_\()\type\()p] // loading the entry
3328 + tbz \type, #0, next_\()\type // Skip invalid entries
3329 + .endm
3330 +
3331 + .macro __idmap_kpti_put_pgtable_ent_ng, type
3332 + orr \type, \type, #PTE_NG // Same bit for blocks and pages
3333 + str \type, [cur_\()\type\()p] // Update the entry and ensure it
3334 + dc civac, cur_\()\type\()p // is visible to all CPUs.
3335 + .endm
3336 +
3337 +/*
3338 + * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
3339 + *
3340 + * Called exactly once from stop_machine context by each CPU found during boot.
3341 + */
3342 +__idmap_kpti_flag:
3343 + .long 1
3344 +ENTRY(idmap_kpti_install_ng_mappings)
3345 + cpu .req w0
3346 + num_cpus .req w1
3347 + swapper_pa .req x2
3348 + swapper_ttb .req x3
3349 + flag_ptr .req x4
3350 + cur_pgdp .req x5
3351 + end_pgdp .req x6
3352 + pgd .req x7
3353 + cur_pudp .req x8
3354 + end_pudp .req x9
3355 + pud .req x10
3356 + cur_pmdp .req x11
3357 + end_pmdp .req x12
3358 + pmd .req x13
3359 + cur_ptep .req x14
3360 + end_ptep .req x15
3361 + pte .req x16
3362 +
3363 + mrs swapper_ttb, ttbr1_el1
3364 + adr flag_ptr, __idmap_kpti_flag
3365 +
3366 + cbnz cpu, __idmap_kpti_secondary
3367 +
3368 + /* We're the boot CPU. Wait for the others to catch up */
3369 + sevl
3370 +1: wfe
3371 + ldaxr w18, [flag_ptr]
3372 + eor w18, w18, num_cpus
3373 + cbnz w18, 1b
3374 +
3375 + /* We need to walk swapper, so turn off the MMU. */
3376 + mrs x18, sctlr_el1
3377 + bic x18, x18, #SCTLR_ELx_M
3378 + msr sctlr_el1, x18
3379 + isb
3380 +
3381 + /* Everybody is enjoying the idmap, so we can rewrite swapper. */
3382 + /* PGD */
3383 + mov cur_pgdp, swapper_pa
3384 + add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
3385 +do_pgd: __idmap_kpti_get_pgtable_ent pgd
3386 + tbnz pgd, #1, walk_puds
3387 + __idmap_kpti_put_pgtable_ent_ng pgd
3388 +next_pgd:
3389 + add cur_pgdp, cur_pgdp, #8
3390 + cmp cur_pgdp, end_pgdp
3391 + b.ne do_pgd
3392 +
3393 + /* Publish the updated tables and nuke all the TLBs */
3394 + dsb sy
3395 + tlbi vmalle1is
3396 + dsb ish
3397 + isb
3398 +
3399 + /* We're done: fire up the MMU again */
3400 + mrs x18, sctlr_el1
3401 + orr x18, x18, #SCTLR_ELx_M
3402 + msr sctlr_el1, x18
3403 + isb
3404 +
3405 + /* Set the flag to zero to indicate that we're all done */
3406 + str wzr, [flag_ptr]
3407 + ret
3408 +
3409 + /* PUD */
3410 +walk_puds:
3411 + .if CONFIG_PGTABLE_LEVELS > 3
3412 + pte_to_phys cur_pudp, pgd
3413 + add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
3414 +do_pud: __idmap_kpti_get_pgtable_ent pud
3415 + tbnz pud, #1, walk_pmds
3416 + __idmap_kpti_put_pgtable_ent_ng pud
3417 +next_pud:
3418 + add cur_pudp, cur_pudp, 8
3419 + cmp cur_pudp, end_pudp
3420 + b.ne do_pud
3421 + b next_pgd
3422 + .else /* CONFIG_PGTABLE_LEVELS <= 3 */
3423 + mov pud, pgd
3424 + b walk_pmds
3425 +next_pud:
3426 + b next_pgd
3427 + .endif
3428 +
3429 + /* PMD */
3430 +walk_pmds:
3431 + .if CONFIG_PGTABLE_LEVELS > 2
3432 + pte_to_phys cur_pmdp, pud
3433 + add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
3434 +do_pmd: __idmap_kpti_get_pgtable_ent pmd
3435 + tbnz pmd, #1, walk_ptes
3436 + __idmap_kpti_put_pgtable_ent_ng pmd
3437 +next_pmd:
3438 + add cur_pmdp, cur_pmdp, #8
3439 + cmp cur_pmdp, end_pmdp
3440 + b.ne do_pmd
3441 + b next_pud
3442 + .else /* CONFIG_PGTABLE_LEVELS <= 2 */
3443 + mov pmd, pud
3444 + b walk_ptes
3445 +next_pmd:
3446 + b next_pud
3447 + .endif
3448 +
3449 + /* PTE */
3450 +walk_ptes:
3451 + pte_to_phys cur_ptep, pmd
3452 + add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
3453 +do_pte: __idmap_kpti_get_pgtable_ent pte
3454 + __idmap_kpti_put_pgtable_ent_ng pte
3455 +next_pte:
3456 + add cur_ptep, cur_ptep, #8
3457 + cmp cur_ptep, end_ptep
3458 + b.ne do_pte
3459 + b next_pmd
3460 +
3461 + /* Secondary CPUs end up here */
3462 +__idmap_kpti_secondary:
3463 + /* Uninstall swapper before surgery begins */
3464 + __idmap_cpu_set_reserved_ttbr1 x18, x17
3465 +
3466 + /* Increment the flag to let the boot CPU we're ready */
3467 +1: ldxr w18, [flag_ptr]
3468 + add w18, w18, #1
3469 + stxr w17, w18, [flag_ptr]
3470 + cbnz w17, 1b
3471 +
3472 + /* Wait for the boot CPU to finish messing around with swapper */
3473 + sevl
3474 +1: wfe
3475 + ldxr w18, [flag_ptr]
3476 + cbnz w18, 1b
3477 +
3478 + /* All done, act like nothing happened */
3479 + msr ttbr1_el1, swapper_ttb
3480 + isb
3481 + ret
3482 +
3483 + .unreq cpu
3484 + .unreq num_cpus
3485 + .unreq swapper_pa
3486 + .unreq swapper_ttb
3487 + .unreq flag_ptr
3488 + .unreq cur_pgdp
3489 + .unreq end_pgdp
3490 + .unreq pgd
3491 + .unreq cur_pudp
3492 + .unreq end_pudp
3493 + .unreq pud
3494 + .unreq cur_pmdp
3495 + .unreq end_pmdp
3496 + .unreq pmd
3497 + .unreq cur_ptep
3498 + .unreq end_ptep
3499 + .unreq pte
3500 +ENDPROC(idmap_kpti_install_ng_mappings)
3501 + .popsection
3502 +#endif
3503 +
3504 /*
3505 * __cpu_setup
3506 *
3507 * Initialise the processor for turning the MMU on. Return in x0 the
3508 * value of the SCTLR_EL1 register.
3509 */
3510 - .pushsection ".idmap.text", "ax"
3511 + .pushsection ".idmap.text", "awx"
3512 ENTRY(__cpu_setup)
3513 tlbi vmalle1 // Invalidate local TLB
3514 dsb nsh
3515 @@ -225,7 +416,7 @@ ENTRY(__cpu_setup)
3516 * both user and kernel.
3517 */
3518 ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
3519 - TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
3520 + TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1
3521 tcr_set_idmap_t0sz x10, x9
3522
3523 /*
3524 diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
3525 index 401ceb71540c..c5f05c4a4d00 100644
3526 --- a/arch/arm64/xen/hypercall.S
3527 +++ b/arch/arm64/xen/hypercall.S
3528 @@ -101,12 +101,12 @@ ENTRY(privcmd_call)
3529 * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
3530 * is enabled (it implies that hardware UAO and PAN disabled).
3531 */
3532 - uaccess_ttbr0_enable x6, x7
3533 + uaccess_ttbr0_enable x6, x7, x8
3534 hvc XEN_IMM
3535
3536 /*
3537 * Disable userspace access from kernel once the hyp call completed.
3538 */
3539 - uaccess_ttbr0_disable x6
3540 + uaccess_ttbr0_disable x6, x7
3541 ret
3542 ENDPROC(privcmd_call);
3543 diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
3544 index b39a388825ae..8ace89617c1c 100644
3545 --- a/arch/mn10300/mm/misalignment.c
3546 +++ b/arch/mn10300/mm/misalignment.c
3547 @@ -437,7 +437,7 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code)
3548
3549 info.si_signo = SIGSEGV;
3550 info.si_errno = 0;
3551 - info.si_code = 0;
3552 + info.si_code = SEGV_MAPERR;
3553 info.si_addr = (void *) regs->pc;
3554 force_sig_info(SIGSEGV, &info, current);
3555 return;
3556 diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
3557 index 803e9e756f77..8d8437169b5e 100644
3558 --- a/arch/openrisc/kernel/traps.c
3559 +++ b/arch/openrisc/kernel/traps.c
3560 @@ -306,12 +306,12 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
3561 siginfo_t info;
3562
3563 if (user_mode(regs)) {
3564 - /* Send a SIGSEGV */
3565 - info.si_signo = SIGSEGV;
3566 + /* Send a SIGBUS */
3567 + info.si_signo = SIGBUS;
3568 info.si_errno = 0;
3569 - /* info.si_code has been set above */
3570 - info.si_addr = (void *)address;
3571 - force_sig_info(SIGSEGV, &info, current);
3572 + info.si_code = BUS_ADRALN;
3573 + info.si_addr = (void __user *)address;
3574 + force_sig_info(SIGBUS, &info, current);
3575 } else {
3576 printk("KERNEL: Unaligned Access 0x%.8lx\n", address);
3577 show_registers(regs);
3578 diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
3579 index f058e0c3e4d4..fd1d6c83f0c0 100644
3580 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
3581 +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
3582 @@ -141,6 +141,7 @@ static struct shash_alg alg = {
3583 .cra_name = "crc32c",
3584 .cra_driver_name = "crc32c-vpmsum",
3585 .cra_priority = 200,
3586 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3587 .cra_blocksize = CHKSUM_BLOCK_SIZE,
3588 .cra_ctxsize = sizeof(u32),
3589 .cra_module = THIS_MODULE,
3590 diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
3591 index f0461618bf7b..eca3f9c68907 100644
3592 --- a/arch/powerpc/include/asm/hvcall.h
3593 +++ b/arch/powerpc/include/asm/hvcall.h
3594 @@ -353,6 +353,7 @@
3595 #define PROC_TABLE_GTSE 0x01
3596
3597 #ifndef __ASSEMBLY__
3598 +#include <linux/types.h>
3599
3600 /**
3601 * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments
3602 diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
3603 index b12b8eb39c29..648160334abf 100644
3604 --- a/arch/powerpc/kvm/Kconfig
3605 +++ b/arch/powerpc/kvm/Kconfig
3606 @@ -68,7 +68,7 @@ config KVM_BOOK3S_64
3607 select KVM_BOOK3S_64_HANDLER
3608 select KVM
3609 select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
3610 - select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV)
3611 + select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV)
3612 ---help---
3613 Support running unmodified book3s_64 and book3s_32 guest kernels
3614 in virtual machines on book3s_64 host processors.
3615 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
3616 index 8d43cf205d34..f48e3379a18a 100644
3617 --- a/arch/powerpc/kvm/book3s_hv.c
3618 +++ b/arch/powerpc/kvm/book3s_hv.c
3619 @@ -999,8 +999,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
3620 struct kvm *kvm = vcpu->kvm;
3621 struct kvm_vcpu *tvcpu;
3622
3623 - if (!cpu_has_feature(CPU_FTR_ARCH_300))
3624 - return EMULATE_FAIL;
3625 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
3626 return RESUME_GUEST;
3627 if (get_op(inst) != 31)
3628 @@ -1050,6 +1048,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
3629 return RESUME_GUEST;
3630 }
3631
3632 +/* Called with vcpu->arch.vcore->lock held */
3633 static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
3634 struct task_struct *tsk)
3635 {
3636 @@ -1169,7 +1168,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
3637 swab32(vcpu->arch.emul_inst) :
3638 vcpu->arch.emul_inst;
3639 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
3640 + /* Need vcore unlocked to call kvmppc_get_last_inst */
3641 + spin_unlock(&vcpu->arch.vcore->lock);
3642 r = kvmppc_emulate_debug_inst(run, vcpu);
3643 + spin_lock(&vcpu->arch.vcore->lock);
3644 } else {
3645 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
3646 r = RESUME_GUEST;
3647 @@ -1184,8 +1186,13 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
3648 */
3649 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
3650 r = EMULATE_FAIL;
3651 - if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG)
3652 + if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
3653 + cpu_has_feature(CPU_FTR_ARCH_300)) {
3654 + /* Need vcore unlocked to call kvmppc_get_last_inst */
3655 + spin_unlock(&vcpu->arch.vcore->lock);
3656 r = kvmppc_emulate_doorbell_instr(vcpu);
3657 + spin_lock(&vcpu->arch.vcore->lock);
3658 + }
3659 if (r == EMULATE_FAIL) {
3660 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
3661 r = RESUME_GUEST;
3662 @@ -2889,13 +2896,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
3663 /* make sure updates to secondary vcpu structs are visible now */
3664 smp_mb();
3665
3666 + preempt_enable();
3667 +
3668 for (sub = 0; sub < core_info.n_subcores; ++sub) {
3669 pvc = core_info.vc[sub];
3670 post_guest_process(pvc, pvc == vc);
3671 }
3672
3673 spin_lock(&vc->lock);
3674 - preempt_enable();
3675
3676 out:
3677 vc->vcore_state = VCORE_INACTIVE;
3678 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
3679 index c85ac5c83bd4..2b3194b9608f 100644
3680 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
3681 +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
3682 @@ -1387,6 +1387,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
3683 blt deliver_guest_interrupt
3684
3685 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
3686 + /* Save more register state */
3687 + mfdar r6
3688 + mfdsisr r7
3689 + std r6, VCPU_DAR(r9)
3690 + stw r7, VCPU_DSISR(r9)
3691 + /* don't overwrite fault_dar/fault_dsisr if HDSI */
3692 + cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
3693 + beq mc_cont
3694 + std r6, VCPU_FAULT_DAR(r9)
3695 + stw r7, VCPU_FAULT_DSISR(r9)
3696 +
3697 + /* See if it is a machine check */
3698 + cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
3699 + beq machine_check_realmode
3700 +mc_cont:
3701 +#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3702 + addi r3, r9, VCPU_TB_RMEXIT
3703 + mr r4, r9
3704 + bl kvmhv_accumulate_time
3705 +#endif
3706 #ifdef CONFIG_KVM_XICS
3707 /* We are exiting, pull the VP from the XIVE */
3708 lwz r0, VCPU_XIVE_PUSHED(r9)
3709 @@ -1424,26 +1444,6 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
3710 eieio
3711 1:
3712 #endif /* CONFIG_KVM_XICS */
3713 - /* Save more register state */
3714 - mfdar r6
3715 - mfdsisr r7
3716 - std r6, VCPU_DAR(r9)
3717 - stw r7, VCPU_DSISR(r9)
3718 - /* don't overwrite fault_dar/fault_dsisr if HDSI */
3719 - cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
3720 - beq mc_cont
3721 - std r6, VCPU_FAULT_DAR(r9)
3722 - stw r7, VCPU_FAULT_DSISR(r9)
3723 -
3724 - /* See if it is a machine check */
3725 - cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
3726 - beq machine_check_realmode
3727 -mc_cont:
3728 -#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3729 - addi r3, r9, VCPU_TB_RMEXIT
3730 - mr r4, r9
3731 - bl kvmhv_accumulate_time
3732 -#endif
3733
3734 mr r3, r12
3735 /* Increment exit count, poke other threads to exit */
3736 diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
3737 index 992e630c227b..6f4985f357c6 100644
3738 --- a/arch/s390/crypto/crc32-vx.c
3739 +++ b/arch/s390/crypto/crc32-vx.c
3740 @@ -238,6 +238,7 @@ static struct shash_alg crc32_vx_algs[] = {
3741 .cra_name = "crc32",
3742 .cra_driver_name = "crc32-vx",
3743 .cra_priority = 200,
3744 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3745 .cra_blocksize = CRC32_BLOCK_SIZE,
3746 .cra_ctxsize = sizeof(struct crc_ctx),
3747 .cra_module = THIS_MODULE,
3748 @@ -258,6 +259,7 @@ static struct shash_alg crc32_vx_algs[] = {
3749 .cra_name = "crc32be",
3750 .cra_driver_name = "crc32be-vx",
3751 .cra_priority = 200,
3752 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3753 .cra_blocksize = CRC32_BLOCK_SIZE,
3754 .cra_ctxsize = sizeof(struct crc_ctx),
3755 .cra_module = THIS_MODULE,
3756 @@ -278,6 +280,7 @@ static struct shash_alg crc32_vx_algs[] = {
3757 .cra_name = "crc32c",
3758 .cra_driver_name = "crc32c-vx",
3759 .cra_priority = 200,
3760 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3761 .cra_blocksize = CRC32_BLOCK_SIZE,
3762 .cra_ctxsize = sizeof(struct crc_ctx),
3763 .cra_module = THIS_MODULE,
3764 diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
3765 index 57cff00cad17..b3770bb26211 100644
3766 --- a/arch/sh/kernel/traps_32.c
3767 +++ b/arch/sh/kernel/traps_32.c
3768 @@ -609,7 +609,8 @@ asmlinkage void do_divide_error(unsigned long r4)
3769 break;
3770 }
3771
3772 - force_sig_info(SIGFPE, &info, current);
3773 + info.si_signo = SIGFPE;
3774 + force_sig_info(info.si_signo, &info, current);
3775 }
3776 #endif
3777
3778 diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
3779 index d1064e46efe8..8aa664638c3c 100644
3780 --- a/arch/sparc/crypto/crc32c_glue.c
3781 +++ b/arch/sparc/crypto/crc32c_glue.c
3782 @@ -133,6 +133,7 @@ static struct shash_alg alg = {
3783 .cra_name = "crc32c",
3784 .cra_driver_name = "crc32c-sparc64",
3785 .cra_priority = SPARC_CR_OPCODE_PRIORITY,
3786 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3787 .cra_blocksize = CHKSUM_BLOCK_SIZE,
3788 .cra_ctxsize = sizeof(u32),
3789 .cra_alignmask = 7,
3790 diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
3791 index 27226df3f7d8..c8d9cdacbf10 100644
3792 --- a/arch/x86/crypto/crc32-pclmul_glue.c
3793 +++ b/arch/x86/crypto/crc32-pclmul_glue.c
3794 @@ -162,6 +162,7 @@ static struct shash_alg alg = {
3795 .cra_name = "crc32",
3796 .cra_driver_name = "crc32-pclmul",
3797 .cra_priority = 200,
3798 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3799 .cra_blocksize = CHKSUM_BLOCK_SIZE,
3800 .cra_ctxsize = sizeof(u32),
3801 .cra_module = THIS_MODULE,
3802 diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
3803 index c194d5717ae5..5773e1161072 100644
3804 --- a/arch/x86/crypto/crc32c-intel_glue.c
3805 +++ b/arch/x86/crypto/crc32c-intel_glue.c
3806 @@ -226,6 +226,7 @@ static struct shash_alg alg = {
3807 .cra_name = "crc32c",
3808 .cra_driver_name = "crc32c-intel",
3809 .cra_priority = 200,
3810 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
3811 .cra_blocksize = CHKSUM_BLOCK_SIZE,
3812 .cra_ctxsize = sizeof(u32),
3813 .cra_module = THIS_MODULE,
3814 diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
3815 index e32142bc071d..28c372003e44 100644
3816 --- a/arch/x86/crypto/poly1305_glue.c
3817 +++ b/arch/x86/crypto/poly1305_glue.c
3818 @@ -164,7 +164,6 @@ static struct shash_alg alg = {
3819 .init = poly1305_simd_init,
3820 .update = poly1305_simd_update,
3821 .final = crypto_poly1305_final,
3822 - .setkey = crypto_poly1305_setkey,
3823 .descsize = sizeof(struct poly1305_simd_desc_ctx),
3824 .base = {
3825 .cra_name = "poly1305",
3826 diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
3827 index 36870b26067a..d08805032f01 100644
3828 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
3829 +++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
3830 @@ -57,10 +57,12 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
3831 {
3832 unsigned int j;
3833
3834 - state->lens[0] = 0;
3835 - state->lens[1] = 1;
3836 - state->lens[2] = 2;
3837 - state->lens[3] = 3;
3838 + /* initially all lanes are unused */
3839 + state->lens[0] = 0xFFFFFFFF00000000;
3840 + state->lens[1] = 0xFFFFFFFF00000001;
3841 + state->lens[2] = 0xFFFFFFFF00000002;
3842 + state->lens[3] = 0xFFFFFFFF00000003;
3843 +
3844 state->unused_lanes = 0xFF03020100;
3845 for (j = 0; j < 4; j++)
3846 state->ldata[j].job_in_lane = NULL;
3847 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
3848 index 0fce8d73403c..beb7f8795bc1 100644
3849 --- a/arch/x86/kvm/mmu.c
3850 +++ b/arch/x86/kvm/mmu.c
3851 @@ -3784,7 +3784,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3852 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
3853 {
3854 if (unlikely(!lapic_in_kernel(vcpu) ||
3855 - kvm_event_needs_reinjection(vcpu)))
3856 + kvm_event_needs_reinjection(vcpu) ||
3857 + vcpu->arch.exception.pending))
3858 return false;
3859
3860 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
3861 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
3862 index 0ae4b1a86168..0ea909ca45c2 100644
3863 --- a/arch/x86/kvm/vmx.c
3864 +++ b/arch/x86/kvm/vmx.c
3865 @@ -5322,14 +5322,15 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
3866
3867 if (is_guest_mode(vcpu) &&
3868 vector == vmx->nested.posted_intr_nv) {
3869 - /* the PIR and ON have been set by L1. */
3870 - kvm_vcpu_trigger_posted_interrupt(vcpu, true);
3871 /*
3872 * If a posted intr is not recognized by hardware,
3873 * we will accomplish it in the next vmentry.
3874 */
3875 vmx->nested.pi_pending = true;
3876 kvm_make_request(KVM_REQ_EVENT, vcpu);
3877 + /* the PIR and ON have been set by L1. */
3878 + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true))
3879 + kvm_vcpu_kick(vcpu);
3880 return 0;
3881 }
3882 return -1;
3883 @@ -11245,7 +11246,6 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
3884 if (block_nested_events)
3885 return -EBUSY;
3886 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3887 - vcpu->arch.exception.pending = false;
3888 return 0;
3889 }
3890
3891 diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
3892 index d0b95b7a90b4..6d112d8f799c 100644
3893 --- a/arch/x86/kvm/x86.h
3894 +++ b/arch/x86/kvm/x86.h
3895 @@ -12,6 +12,7 @@
3896
3897 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
3898 {
3899 + vcpu->arch.exception.pending = false;
3900 vcpu->arch.exception.injected = false;
3901 }
3902
3903 diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
3904 index eaaf1ebcc7a4..5bfbc1c401d4 100644
3905 --- a/arch/xtensa/include/asm/futex.h
3906 +++ b/arch/xtensa/include/asm/futex.h
3907 @@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
3908 u32 oldval, u32 newval)
3909 {
3910 int ret = 0;
3911 - u32 prev;
3912
3913 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
3914 return -EFAULT;
3915 @@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
3916
3917 __asm__ __volatile__ (
3918 " # futex_atomic_cmpxchg_inatomic\n"
3919 - "1: l32i %1, %3, 0\n"
3920 - " mov %0, %5\n"
3921 - " wsr %1, scompare1\n"
3922 - "2: s32c1i %0, %3, 0\n"
3923 - "3:\n"
3924 + " wsr %5, scompare1\n"
3925 + "1: s32c1i %1, %4, 0\n"
3926 + " s32i %1, %6, 0\n"
3927 + "2:\n"
3928 " .section .fixup,\"ax\"\n"
3929 " .align 4\n"
3930 - "4: .long 3b\n"
3931 - "5: l32r %1, 4b\n"
3932 - " movi %0, %6\n"
3933 + "3: .long 2b\n"
3934 + "4: l32r %1, 3b\n"
3935 + " movi %0, %7\n"
3936 " jx %1\n"
3937 " .previous\n"
3938 " .section __ex_table,\"a\"\n"
3939 - " .long 1b,5b,2b,5b\n"
3940 + " .long 1b,4b\n"
3941 " .previous\n"
3942 - : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
3943 - : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT)
3944 + : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
3945 + : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
3946 : "memory");
3947
3948 - *uval = prev;
3949 return ret;
3950 }
3951
3952 diff --git a/block/blk-core.c b/block/blk-core.c
3953 index 7b30bf10b1d4..f3750389e351 100644
3954 --- a/block/blk-core.c
3955 +++ b/block/blk-core.c
3956 @@ -660,6 +660,15 @@ void blk_cleanup_queue(struct request_queue *q)
3957 queue_flag_set(QUEUE_FLAG_DEAD, q);
3958 spin_unlock_irq(lock);
3959
3960 + /*
3961 + * make sure all in-progress dispatch are completed because
3962 + * blk_freeze_queue() can only complete all requests, and
3963 + * dispatch may still be in-progress since we dispatch requests
3964 + * from more than one contexts
3965 + */
3966 + if (q->mq_ops)
3967 + blk_mq_quiesce_queue(q);
3968 +
3969 /* for synchronous bio-based driver finish in-flight integrity i/o */
3970 blk_flush_integrity();
3971
3972 diff --git a/crypto/ahash.c b/crypto/ahash.c
3973 index 5e8666e6ccae..f75b5c1f7152 100644
3974 --- a/crypto/ahash.c
3975 +++ b/crypto/ahash.c
3976 @@ -193,11 +193,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
3977 unsigned int keylen)
3978 {
3979 unsigned long alignmask = crypto_ahash_alignmask(tfm);
3980 + int err;
3981
3982 if ((unsigned long)key & alignmask)
3983 - return ahash_setkey_unaligned(tfm, key, keylen);
3984 + err = ahash_setkey_unaligned(tfm, key, keylen);
3985 + else
3986 + err = tfm->setkey(tfm, key, keylen);
3987 +
3988 + if (err)
3989 + return err;
3990
3991 - return tfm->setkey(tfm, key, keylen);
3992 + crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
3993 + return 0;
3994 }
3995 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
3996
3997 @@ -370,7 +377,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup);
3998
3999 int crypto_ahash_digest(struct ahash_request *req)
4000 {
4001 - return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
4002 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
4003 +
4004 + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
4005 + return -ENOKEY;
4006 +
4007 + return crypto_ahash_op(req, tfm->digest);
4008 }
4009 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
4010
4011 @@ -456,7 +468,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
4012 struct ahash_alg *alg = crypto_ahash_alg(hash);
4013
4014 hash->setkey = ahash_nosetkey;
4015 - hash->has_setkey = false;
4016 hash->export = ahash_no_export;
4017 hash->import = ahash_no_import;
4018
4019 @@ -471,7 +482,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
4020
4021 if (alg->setkey) {
4022 hash->setkey = alg->setkey;
4023 - hash->has_setkey = true;
4024 + if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
4025 + crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
4026 }
4027 if (alg->export)
4028 hash->export = alg->export;
4029 @@ -655,5 +667,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
4030 }
4031 EXPORT_SYMBOL_GPL(ahash_attr_alg);
4032
4033 +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
4034 +{
4035 + struct crypto_alg *alg = &halg->base;
4036 +
4037 + if (alg->cra_type != &crypto_ahash_type)
4038 + return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
4039 +
4040 + return __crypto_ahash_alg(alg)->setkey != NULL;
4041 +}
4042 +EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
4043 +
4044 MODULE_LICENSE("GPL");
4045 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
4046 diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
4047 index 5e92bd275ef3..39cebd3256bf 100644
4048 --- a/crypto/algif_hash.c
4049 +++ b/crypto/algif_hash.c
4050 @@ -34,11 +34,6 @@ struct hash_ctx {
4051 struct ahash_request req;
4052 };
4053
4054 -struct algif_hash_tfm {
4055 - struct crypto_ahash *hash;
4056 - bool has_key;
4057 -};
4058 -
4059 static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
4060 {
4061 unsigned ds;
4062 @@ -309,7 +304,7 @@ static int hash_check_key(struct socket *sock)
4063 int err = 0;
4064 struct sock *psk;
4065 struct alg_sock *pask;
4066 - struct algif_hash_tfm *tfm;
4067 + struct crypto_ahash *tfm;
4068 struct sock *sk = sock->sk;
4069 struct alg_sock *ask = alg_sk(sk);
4070
4071 @@ -323,7 +318,7 @@ static int hash_check_key(struct socket *sock)
4072
4073 err = -ENOKEY;
4074 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
4075 - if (!tfm->has_key)
4076 + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
4077 goto unlock;
4078
4079 if (!pask->refcnt++)
4080 @@ -414,41 +409,17 @@ static struct proto_ops algif_hash_ops_nokey = {
4081
4082 static void *hash_bind(const char *name, u32 type, u32 mask)
4083 {
4084 - struct algif_hash_tfm *tfm;
4085 - struct crypto_ahash *hash;
4086 -
4087 - tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
4088 - if (!tfm)
4089 - return ERR_PTR(-ENOMEM);
4090 -
4091 - hash = crypto_alloc_ahash(name, type, mask);
4092 - if (IS_ERR(hash)) {
4093 - kfree(tfm);
4094 - return ERR_CAST(hash);
4095 - }
4096 -
4097 - tfm->hash = hash;
4098 -
4099 - return tfm;
4100 + return crypto_alloc_ahash(name, type, mask);
4101 }
4102
4103 static void hash_release(void *private)
4104 {
4105 - struct algif_hash_tfm *tfm = private;
4106 -
4107 - crypto_free_ahash(tfm->hash);
4108 - kfree(tfm);
4109 + crypto_free_ahash(private);
4110 }
4111
4112 static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
4113 {
4114 - struct algif_hash_tfm *tfm = private;
4115 - int err;
4116 -
4117 - err = crypto_ahash_setkey(tfm->hash, key, keylen);
4118 - tfm->has_key = !err;
4119 -
4120 - return err;
4121 + return crypto_ahash_setkey(private, key, keylen);
4122 }
4123
4124 static void hash_sock_destruct(struct sock *sk)
4125 @@ -463,11 +434,10 @@ static void hash_sock_destruct(struct sock *sk)
4126
4127 static int hash_accept_parent_nokey(void *private, struct sock *sk)
4128 {
4129 - struct hash_ctx *ctx;
4130 + struct crypto_ahash *tfm = private;
4131 struct alg_sock *ask = alg_sk(sk);
4132 - struct algif_hash_tfm *tfm = private;
4133 - struct crypto_ahash *hash = tfm->hash;
4134 - unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
4135 + struct hash_ctx *ctx;
4136 + unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm);
4137
4138 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
4139 if (!ctx)
4140 @@ -480,7 +450,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
4141
4142 ask->private = ctx;
4143
4144 - ahash_request_set_tfm(&ctx->req, hash);
4145 + ahash_request_set_tfm(&ctx->req, tfm);
4146 ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
4147 af_alg_complete, &ctx->completion);
4148
4149 @@ -491,9 +461,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
4150
4151 static int hash_accept_parent(void *private, struct sock *sk)
4152 {
4153 - struct algif_hash_tfm *tfm = private;
4154 + struct crypto_ahash *tfm = private;
4155
4156 - if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
4157 + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
4158 return -ENOKEY;
4159
4160 return hash_accept_parent_nokey(private, sk);
4161 diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
4162 index aa2a25fc7482..718cbce8d169 100644
4163 --- a/crypto/crc32_generic.c
4164 +++ b/crypto/crc32_generic.c
4165 @@ -133,6 +133,7 @@ static struct shash_alg alg = {
4166 .cra_name = "crc32",
4167 .cra_driver_name = "crc32-generic",
4168 .cra_priority = 100,
4169 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
4170 .cra_blocksize = CHKSUM_BLOCK_SIZE,
4171 .cra_ctxsize = sizeof(u32),
4172 .cra_module = THIS_MODULE,
4173 diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
4174 index 4c0a0e271876..372320399622 100644
4175 --- a/crypto/crc32c_generic.c
4176 +++ b/crypto/crc32c_generic.c
4177 @@ -146,6 +146,7 @@ static struct shash_alg alg = {
4178 .cra_name = "crc32c",
4179 .cra_driver_name = "crc32c-generic",
4180 .cra_priority = 100,
4181 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
4182 .cra_blocksize = CHKSUM_BLOCK_SIZE,
4183 .cra_alignmask = 3,
4184 .cra_ctxsize = sizeof(struct chksum_ctx),
4185 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
4186 index 0508c48a45c4..248f6ba41688 100644
4187 --- a/crypto/cryptd.c
4188 +++ b/crypto/cryptd.c
4189 @@ -895,10 +895,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
4190 if (err)
4191 goto out_free_inst;
4192
4193 - type = CRYPTO_ALG_ASYNC;
4194 - if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
4195 - type |= CRYPTO_ALG_INTERNAL;
4196 - inst->alg.halg.base.cra_flags = type;
4197 + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
4198 + (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
4199 + CRYPTO_ALG_OPTIONAL_KEY));
4200
4201 inst->alg.halg.digestsize = salg->digestsize;
4202 inst->alg.halg.statesize = salg->statesize;
4203 @@ -913,7 +912,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
4204 inst->alg.finup = cryptd_hash_finup_enqueue;
4205 inst->alg.export = cryptd_hash_export;
4206 inst->alg.import = cryptd_hash_import;
4207 - inst->alg.setkey = cryptd_hash_setkey;
4208 + if (crypto_shash_alg_has_setkey(salg))
4209 + inst->alg.setkey = cryptd_hash_setkey;
4210 inst->alg.digest = cryptd_hash_digest_enqueue;
4211
4212 err = ahash_register_instance(tmpl, inst);
4213 diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
4214 index eca04d3729b3..e0732d979e3b 100644
4215 --- a/crypto/mcryptd.c
4216 +++ b/crypto/mcryptd.c
4217 @@ -517,10 +517,9 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
4218 if (err)
4219 goto out_free_inst;
4220
4221 - type = CRYPTO_ALG_ASYNC;
4222 - if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
4223 - type |= CRYPTO_ALG_INTERNAL;
4224 - inst->alg.halg.base.cra_flags = type;
4225 + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
4226 + (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
4227 + CRYPTO_ALG_OPTIONAL_KEY));
4228
4229 inst->alg.halg.digestsize = halg->digestsize;
4230 inst->alg.halg.statesize = halg->statesize;
4231 @@ -535,7 +534,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
4232 inst->alg.finup = mcryptd_hash_finup_enqueue;
4233 inst->alg.export = mcryptd_hash_export;
4234 inst->alg.import = mcryptd_hash_import;
4235 - inst->alg.setkey = mcryptd_hash_setkey;
4236 + if (crypto_hash_alg_has_setkey(halg))
4237 + inst->alg.setkey = mcryptd_hash_setkey;
4238 inst->alg.digest = mcryptd_hash_digest_enqueue;
4239
4240 err = ahash_register_instance(tmpl, inst);
4241 diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
4242 index b1c2d57dc734..ba39eb308c79 100644
4243 --- a/crypto/poly1305_generic.c
4244 +++ b/crypto/poly1305_generic.c
4245 @@ -47,17 +47,6 @@ int crypto_poly1305_init(struct shash_desc *desc)
4246 }
4247 EXPORT_SYMBOL_GPL(crypto_poly1305_init);
4248
4249 -int crypto_poly1305_setkey(struct crypto_shash *tfm,
4250 - const u8 *key, unsigned int keylen)
4251 -{
4252 - /* Poly1305 requires a unique key for each tag, which implies that
4253 - * we can't set it on the tfm that gets accessed by multiple users
4254 - * simultaneously. Instead we expect the key as the first 32 bytes in
4255 - * the update() call. */
4256 - return -ENOTSUPP;
4257 -}
4258 -EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
4259 -
4260 static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
4261 {
4262 /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
4263 @@ -76,6 +65,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
4264 dctx->s[3] = get_unaligned_le32(key + 12);
4265 }
4266
4267 +/*
4268 + * Poly1305 requires a unique key for each tag, which implies that we can't set
4269 + * it on the tfm that gets accessed by multiple users simultaneously. Instead we
4270 + * expect the key as the first 32 bytes in the update() call.
4271 + */
4272 unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
4273 const u8 *src, unsigned int srclen)
4274 {
4275 @@ -281,7 +275,6 @@ static struct shash_alg poly1305_alg = {
4276 .init = crypto_poly1305_init,
4277 .update = crypto_poly1305_update,
4278 .final = crypto_poly1305_final,
4279 - .setkey = crypto_poly1305_setkey,
4280 .descsize = sizeof(struct poly1305_desc_ctx),
4281 .base = {
4282 .cra_name = "poly1305",
4283 diff --git a/crypto/shash.c b/crypto/shash.c
4284 index e849d3ee2e27..5d732c6bb4b2 100644
4285 --- a/crypto/shash.c
4286 +++ b/crypto/shash.c
4287 @@ -58,11 +58,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
4288 {
4289 struct shash_alg *shash = crypto_shash_alg(tfm);
4290 unsigned long alignmask = crypto_shash_alignmask(tfm);
4291 + int err;
4292
4293 if ((unsigned long)key & alignmask)
4294 - return shash_setkey_unaligned(tfm, key, keylen);
4295 + err = shash_setkey_unaligned(tfm, key, keylen);
4296 + else
4297 + err = shash->setkey(tfm, key, keylen);
4298 +
4299 + if (err)
4300 + return err;
4301
4302 - return shash->setkey(tfm, key, keylen);
4303 + crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
4304 + return 0;
4305 }
4306 EXPORT_SYMBOL_GPL(crypto_shash_setkey);
4307
4308 @@ -181,6 +188,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
4309 struct shash_alg *shash = crypto_shash_alg(tfm);
4310 unsigned long alignmask = crypto_shash_alignmask(tfm);
4311
4312 + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
4313 + return -ENOKEY;
4314 +
4315 if (((unsigned long)data | (unsigned long)out) & alignmask)
4316 return shash_digest_unaligned(desc, data, len, out);
4317
4318 @@ -360,7 +370,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
4319 crt->digest = shash_async_digest;
4320 crt->setkey = shash_async_setkey;
4321
4322 - crt->has_setkey = alg->setkey != shash_no_setkey;
4323 + crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
4324 + CRYPTO_TFM_NEED_KEY);
4325
4326 if (alg->export)
4327 crt->export = shash_async_export;
4328 @@ -375,8 +386,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
4329 static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
4330 {
4331 struct crypto_shash *hash = __crypto_shash_cast(tfm);
4332 + struct shash_alg *alg = crypto_shash_alg(hash);
4333 +
4334 + hash->descsize = alg->descsize;
4335 +
4336 + if (crypto_shash_alg_has_setkey(alg) &&
4337 + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
4338 + crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
4339
4340 - hash->descsize = crypto_shash_alg(hash)->descsize;
4341 return 0;
4342 }
4343
4344 diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
4345 index dea0fb3d6f64..f14b4326e855 100644
4346 --- a/drivers/acpi/nfit/core.c
4347 +++ b/drivers/acpi/nfit/core.c
4348 @@ -1618,6 +1618,9 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
4349 struct kernfs_node *nfit_kernfs;
4350
4351 nvdimm = nfit_mem->nvdimm;
4352 + if (!nvdimm)
4353 + continue;
4354 +
4355 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
4356 if (nfit_kernfs)
4357 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
4358 diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
4359 index 2fa8304171e0..7a3431018e0a 100644
4360 --- a/drivers/acpi/sbshc.c
4361 +++ b/drivers/acpi/sbshc.c
4362 @@ -275,8 +275,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
4363 device->driver_data = hc;
4364
4365 acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc);
4366 - printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n",
4367 - hc->ec, hc->offset, hc->query_bit);
4368 + dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n",
4369 + hc->offset, hc->query_bit);
4370
4371 return 0;
4372 }
4373 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
4374 index 9f78bb03bb76..bc013f757d5d 100644
4375 --- a/drivers/ata/ahci.c
4376 +++ b/drivers/ata/ahci.c
4377 @@ -267,9 +267,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
4378 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
4379 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
4380 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
4381 - { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
4382 + { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH M AHCI */
4383 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
4384 - { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
4385 + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH M RAID */
4386 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
4387 { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
4388 { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
4389 @@ -292,9 +292,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
4390 { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
4391 { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
4392 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
4393 - { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
4394 + { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT M AHCI */
4395 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
4396 - { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
4397 + { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT M RAID */
4398 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
4399 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
4400 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
4401 @@ -303,20 +303,20 @@ static const struct pci_device_id ahci_pci_tbl[] = {
4402 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
4403 { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
4404 { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
4405 - { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */
4406 + { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point M AHCI */
4407 { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
4408 { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
4409 { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
4410 - { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
4411 + { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point M RAID */
4412 { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
4413 { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
4414 - { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */
4415 + { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point M AHCI */
4416 { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
4417 - { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */
4418 + { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point M RAID */
4419 { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
4420 - { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
4421 + { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point M RAID */
4422 { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
4423 - { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
4424 + { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point M RAID */
4425 { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
4426 { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
4427 { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
4428 @@ -357,21 +357,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
4429 { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
4430 { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
4431 { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
4432 - { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
4433 + { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series M AHCI */
4434 { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
4435 - { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
4436 + { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series M RAID */
4437 { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
4438 - { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
4439 + { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series M RAID */
4440 { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
4441 - { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
4442 + { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series M RAID */
4443 { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
4444 { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
4445 { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
4446 { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
4447 - { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
4448 + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H M AHCI */
4449 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
4450 { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
4451 - { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
4452 + { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H M RAID */
4453 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
4454 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
4455 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
4456 @@ -385,6 +385,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
4457 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
4458 { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
4459 { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
4460 + { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
4461 + { PCI_VDEVICE(INTEL, 0x0f22), board_ahci }, /* Bay Trail AHCI */
4462 + { PCI_VDEVICE(INTEL, 0x0f23), board_ahci }, /* Bay Trail AHCI */
4463 + { PCI_VDEVICE(INTEL, 0x22a3), board_ahci }, /* Cherry Trail AHCI */
4464 + { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci }, /* Apollo Lake AHCI */
4465
4466 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
4467 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
4468 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
4469 index 67974796c350..531a0915066b 100644
4470 --- a/drivers/block/pktcdvd.c
4471 +++ b/drivers/block/pktcdvd.c
4472 @@ -2579,14 +2579,14 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
4473 bdev = bdget(dev);
4474 if (!bdev)
4475 return -ENOMEM;
4476 + ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
4477 + if (ret)
4478 + return ret;
4479 if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
4480 WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
4481 - bdput(bdev);
4482 + blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
4483 return -EINVAL;
4484 }
4485 - ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
4486 - if (ret)
4487 - return ret;
4488
4489 /* This is safe, since we have a reference from open(). */
4490 __module_get(THIS_MODULE);
4491 @@ -2745,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
4492 pd->pkt_dev = MKDEV(pktdev_major, idx);
4493 ret = pkt_new_dev(pd, dev);
4494 if (ret)
4495 - goto out_new_dev;
4496 + goto out_mem2;
4497
4498 /* inherit events of the host device */
4499 disk->events = pd->bdev->bd_disk->events;
4500 @@ -2763,8 +2763,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
4501 mutex_unlock(&ctl_mutex);
4502 return 0;
4503
4504 -out_new_dev:
4505 - blk_cleanup_queue(disk->queue);
4506 out_mem2:
4507 put_disk(disk);
4508 out_mem:
4509 diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
4510 index c8e945d19ffe..20142bc77554 100644
4511 --- a/drivers/bluetooth/btsdio.c
4512 +++ b/drivers/bluetooth/btsdio.c
4513 @@ -31,6 +31,7 @@
4514 #include <linux/errno.h>
4515 #include <linux/skbuff.h>
4516
4517 +#include <linux/mmc/host.h>
4518 #include <linux/mmc/sdio_ids.h>
4519 #include <linux/mmc/sdio_func.h>
4520
4521 @@ -292,6 +293,14 @@ static int btsdio_probe(struct sdio_func *func,
4522 tuple = tuple->next;
4523 }
4524
4525 + /* BCM43341 devices soldered onto the PCB (non-removable) use an
4526 + * uart connection for bluetooth, ignore the BT SDIO interface.
4527 + */
4528 + if (func->vendor == SDIO_VENDOR_ID_BROADCOM &&
4529 + func->device == SDIO_DEVICE_ID_BROADCOM_43341 &&
4530 + !mmc_card_is_removable(func->card->host))
4531 + return -ENODEV;
4532 +
4533 data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
4534 if (!data)
4535 return -ENOMEM;
4536 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
4537 index 513a7a59d421..d54c3f6f728c 100644
4538 --- a/drivers/bluetooth/btusb.c
4539 +++ b/drivers/bluetooth/btusb.c
4540 @@ -23,6 +23,7 @@
4541
4542 #include <linux/module.h>
4543 #include <linux/usb.h>
4544 +#include <linux/usb/quirks.h>
4545 #include <linux/firmware.h>
4546 #include <linux/of_device.h>
4547 #include <linux/of_irq.h>
4548 @@ -392,9 +393,8 @@ static const struct usb_device_id blacklist_table[] = {
4549 #define BTUSB_FIRMWARE_LOADED 7
4550 #define BTUSB_FIRMWARE_FAILED 8
4551 #define BTUSB_BOOTING 9
4552 -#define BTUSB_RESET_RESUME 10
4553 -#define BTUSB_DIAG_RUNNING 11
4554 -#define BTUSB_OOB_WAKE_ENABLED 12
4555 +#define BTUSB_DIAG_RUNNING 10
4556 +#define BTUSB_OOB_WAKE_ENABLED 11
4557
4558 struct btusb_data {
4559 struct hci_dev *hdev;
4560 @@ -3102,9 +3102,9 @@ static int btusb_probe(struct usb_interface *intf,
4561
4562 /* QCA Rome devices lose their updated firmware over suspend,
4563 * but the USB hub doesn't notice any status change.
4564 - * Explicitly request a device reset on resume.
4565 + * explicitly request a device reset on resume.
4566 */
4567 - set_bit(BTUSB_RESET_RESUME, &data->flags);
4568 + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
4569 }
4570
4571 #ifdef CONFIG_BT_HCIBTUSB_RTL
4572 @@ -3115,7 +3115,7 @@ static int btusb_probe(struct usb_interface *intf,
4573 * but the USB hub doesn't notice any status change.
4574 * Explicitly request a device reset on resume.
4575 */
4576 - set_bit(BTUSB_RESET_RESUME, &data->flags);
4577 + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
4578 }
4579 #endif
4580
4581 @@ -3280,14 +3280,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
4582 enable_irq(data->oob_wake_irq);
4583 }
4584
4585 - /* Optionally request a device reset on resume, but only when
4586 - * wakeups are disabled. If wakeups are enabled we assume the
4587 - * device will stay powered up throughout suspend.
4588 - */
4589 - if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
4590 - !device_may_wakeup(&data->udev->dev))
4591 - data->udev->reset_resume = 1;
4592 -
4593 return 0;
4594 }
4595
4596 diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
4597 index 2059f79d669a..c3a23ec3e76f 100644
4598 --- a/drivers/char/ipmi/ipmi_dmi.c
4599 +++ b/drivers/char/ipmi/ipmi_dmi.c
4600 @@ -81,7 +81,10 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
4601 pr_err("ipmi:dmi: Error allocation IPMI platform device");
4602 return;
4603 }
4604 - pdev->driver_override = override;
4605 + pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
4606 + override);
4607 + if (!pdev->driver_override)
4608 + goto err;
4609
4610 if (type == IPMI_DMI_TYPE_SSIF)
4611 goto add_properties;
4612 diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
4613 index 8f2423789ba9..4bfeb9929ab2 100644
4614 --- a/drivers/clocksource/timer-stm32.c
4615 +++ b/drivers/clocksource/timer-stm32.c
4616 @@ -106,6 +106,10 @@ static int __init stm32_clockevent_init(struct device_node *np)
4617 unsigned long rate, max_delta;
4618 int irq, ret, bits, prescaler = 1;
4619
4620 + data = kmemdup(&clock_event_ddata, sizeof(*data), GFP_KERNEL);
4621 + if (!data)
4622 + return -ENOMEM;
4623 +
4624 clk = of_clk_get(np, 0);
4625 if (IS_ERR(clk)) {
4626 ret = PTR_ERR(clk);
4627 @@ -156,8 +160,8 @@ static int __init stm32_clockevent_init(struct device_node *np)
4628
4629 writel_relaxed(prescaler - 1, data->base + TIM_PSC);
4630 writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR);
4631 - writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER);
4632 writel_relaxed(0, data->base + TIM_SR);
4633 + writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER);
4634
4635 data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ);
4636
4637 @@ -184,6 +188,7 @@ static int __init stm32_clockevent_init(struct device_node *np)
4638 err_clk_enable:
4639 clk_put(clk);
4640 err_clk_get:
4641 + kfree(data);
4642 return ret;
4643 }
4644
4645 diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
4646 index a753c50e9e41..9e0aa767bbbe 100644
4647 --- a/drivers/cpufreq/cpufreq-dt-platdev.c
4648 +++ b/drivers/cpufreq/cpufreq-dt-platdev.c
4649 @@ -111,6 +111,14 @@ static const struct of_device_id blacklist[] __initconst = {
4650
4651 { .compatible = "marvell,armadaxp", },
4652
4653 + { .compatible = "mediatek,mt2701", },
4654 + { .compatible = "mediatek,mt2712", },
4655 + { .compatible = "mediatek,mt7622", },
4656 + { .compatible = "mediatek,mt7623", },
4657 + { .compatible = "mediatek,mt817x", },
4658 + { .compatible = "mediatek,mt8173", },
4659 + { .compatible = "mediatek,mt8176", },
4660 +
4661 { .compatible = "nvidia,tegra124", },
4662
4663 { .compatible = "st,stih407", },
4664 diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
4665 index a118b9bed669..bfbf8bf77f03 100644
4666 --- a/drivers/crypto/bfin_crc.c
4667 +++ b/drivers/crypto/bfin_crc.c
4668 @@ -494,7 +494,8 @@ static struct ahash_alg algs = {
4669 .cra_driver_name = DRIVER_NAME,
4670 .cra_priority = 100,
4671 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
4672 - CRYPTO_ALG_ASYNC,
4673 + CRYPTO_ALG_ASYNC |
4674 + CRYPTO_ALG_OPTIONAL_KEY,
4675 .cra_blocksize = CHKSUM_BLOCK_SIZE,
4676 .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
4677 .cra_alignmask = 3,
4678 diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
4679 index 027e121c6f70..e1d4ae1153c4 100644
4680 --- a/drivers/crypto/caam/ctrl.c
4681 +++ b/drivers/crypto/caam/ctrl.c
4682 @@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
4683 * without any error (HW optimizations for later
4684 * CAAM eras), then try again.
4685 */
4686 + if (ret)
4687 + break;
4688 +
4689 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
4690 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
4691 - !(rdsta_val & (1 << sh_idx)))
4692 + !(rdsta_val & (1 << sh_idx))) {
4693 ret = -EAGAIN;
4694 - if (ret)
4695 break;
4696 + }
4697 +
4698 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
4699 /* Clear the contents before recreating the descriptor */
4700 memset(desc, 0x00, CAAM_CMD_SZ * 7);
4701 diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
4702 index 090582baecfe..8f09b8430893 100644
4703 --- a/drivers/crypto/stm32/stm32_crc32.c
4704 +++ b/drivers/crypto/stm32/stm32_crc32.c
4705 @@ -208,6 +208,7 @@ static struct shash_alg algs[] = {
4706 .cra_name = "crc32",
4707 .cra_driver_name = DRIVER_NAME,
4708 .cra_priority = 200,
4709 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
4710 .cra_blocksize = CHKSUM_BLOCK_SIZE,
4711 .cra_alignmask = 3,
4712 .cra_ctxsize = sizeof(struct stm32_crc_ctx),
4713 @@ -229,6 +230,7 @@ static struct shash_alg algs[] = {
4714 .cra_name = "crc32c",
4715 .cra_driver_name = DRIVER_NAME,
4716 .cra_priority = 200,
4717 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
4718 .cra_blocksize = CHKSUM_BLOCK_SIZE,
4719 .cra_alignmask = 3,
4720 .cra_ctxsize = sizeof(struct stm32_crc_ctx),
4721 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
4722 index ec5f9d2bc820..80cc2be6483c 100644
4723 --- a/drivers/dma/dmatest.c
4724 +++ b/drivers/dma/dmatest.c
4725 @@ -355,7 +355,7 @@ static void dmatest_callback(void *arg)
4726 {
4727 struct dmatest_done *done = arg;
4728 struct dmatest_thread *thread =
4729 - container_of(arg, struct dmatest_thread, done_wait);
4730 + container_of(done, struct dmatest_thread, test_done);
4731 if (!thread->done) {
4732 done->done = true;
4733 wake_up_all(done->wait);
4734 diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
4735 index 9c1ffe3e912b..aeb222ca3ed1 100644
4736 --- a/drivers/edac/octeon_edac-lmc.c
4737 +++ b/drivers/edac/octeon_edac-lmc.c
4738 @@ -78,6 +78,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
4739 if (!pvt->inject)
4740 int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
4741 else {
4742 + int_reg.u64 = 0;
4743 if (pvt->error_type == 1)
4744 int_reg.s.sec_err = 1;
4745 if (pvt->error_type == 2)
4746 diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
4747 index d687ca3d5049..c80ec1d03274 100644
4748 --- a/drivers/firmware/psci.c
4749 +++ b/drivers/firmware/psci.c
4750 @@ -59,7 +59,10 @@ bool psci_tos_resident_on(int cpu)
4751 return cpu == resident_cpu;
4752 }
4753
4754 -struct psci_operations psci_ops;
4755 +struct psci_operations psci_ops = {
4756 + .conduit = PSCI_CONDUIT_NONE,
4757 + .smccc_version = SMCCC_VERSION_1_0,
4758 +};
4759
4760 typedef unsigned long (psci_fn)(unsigned long, unsigned long,
4761 unsigned long, unsigned long);
4762 @@ -210,6 +213,22 @@ static unsigned long psci_migrate_info_up_cpu(void)
4763 0, 0, 0);
4764 }
4765
4766 +static void set_conduit(enum psci_conduit conduit)
4767 +{
4768 + switch (conduit) {
4769 + case PSCI_CONDUIT_HVC:
4770 + invoke_psci_fn = __invoke_psci_fn_hvc;
4771 + break;
4772 + case PSCI_CONDUIT_SMC:
4773 + invoke_psci_fn = __invoke_psci_fn_smc;
4774 + break;
4775 + default:
4776 + WARN(1, "Unexpected PSCI conduit %d\n", conduit);
4777 + }
4778 +
4779 + psci_ops.conduit = conduit;
4780 +}
4781 +
4782 static int get_set_conduit_method(struct device_node *np)
4783 {
4784 const char *method;
4785 @@ -222,9 +241,9 @@ static int get_set_conduit_method(struct device_node *np)
4786 }
4787
4788 if (!strcmp("hvc", method)) {
4789 - invoke_psci_fn = __invoke_psci_fn_hvc;
4790 + set_conduit(PSCI_CONDUIT_HVC);
4791 } else if (!strcmp("smc", method)) {
4792 - invoke_psci_fn = __invoke_psci_fn_smc;
4793 + set_conduit(PSCI_CONDUIT_SMC);
4794 } else {
4795 pr_warn("invalid \"method\" property: %s\n", method);
4796 return -EINVAL;
4797 @@ -493,9 +512,36 @@ static void __init psci_init_migrate(void)
4798 pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
4799 }
4800
4801 +static void __init psci_init_smccc(void)
4802 +{
4803 + u32 ver = ARM_SMCCC_VERSION_1_0;
4804 + int feature;
4805 +
4806 + feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
4807 +
4808 + if (feature != PSCI_RET_NOT_SUPPORTED) {
4809 + u32 ret;
4810 + ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
4811 + if (ret == ARM_SMCCC_VERSION_1_1) {
4812 + psci_ops.smccc_version = SMCCC_VERSION_1_1;
4813 + ver = ret;
4814 + }
4815 + }
4816 +
4817 + /*
4818 + * Conveniently, the SMCCC and PSCI versions are encoded the
4819 + * same way. No, this isn't accidental.
4820 + */
4821 + pr_info("SMC Calling Convention v%d.%d\n",
4822 + PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
4823 +
4824 +}
4825 +
4826 static void __init psci_0_2_set_functions(void)
4827 {
4828 pr_info("Using standard PSCI v0.2 function IDs\n");
4829 + psci_ops.get_version = psci_get_version;
4830 +
4831 psci_function_id[PSCI_FN_CPU_SUSPEND] =
4832 PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
4833 psci_ops.cpu_suspend = psci_cpu_suspend;
4834 @@ -539,6 +585,7 @@ static int __init psci_probe(void)
4835 psci_init_migrate();
4836
4837 if (PSCI_VERSION_MAJOR(ver) >= 1) {
4838 + psci_init_smccc();
4839 psci_init_cpu_suspend();
4840 psci_init_system_suspend();
4841 }
4842 @@ -652,9 +699,9 @@ int __init psci_acpi_init(void)
4843 pr_info("probing for conduit method from ACPI.\n");
4844
4845 if (acpi_psci_use_hvc())
4846 - invoke_psci_fn = __invoke_psci_fn_hvc;
4847 + set_conduit(PSCI_CONDUIT_HVC);
4848 else
4849 - invoke_psci_fn = __invoke_psci_fn_smc;
4850 + set_conduit(PSCI_CONDUIT_SMC);
4851
4852 return psci_probe();
4853 }
4854 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
4855 index 095a2240af4f..46485692db48 100644
4856 --- a/drivers/gpu/drm/i915/intel_display.c
4857 +++ b/drivers/gpu/drm/i915/intel_display.c
4858 @@ -1211,23 +1211,6 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
4859 pipe_name(pipe));
4860 }
4861
4862 -static void assert_cursor(struct drm_i915_private *dev_priv,
4863 - enum pipe pipe, bool state)
4864 -{
4865 - bool cur_state;
4866 -
4867 - if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
4868 - cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
4869 - else
4870 - cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
4871 -
4872 - I915_STATE_WARN(cur_state != state,
4873 - "cursor on pipe %c assertion failure (expected %s, current %s)\n",
4874 - pipe_name(pipe), onoff(state), onoff(cur_state));
4875 -}
4876 -#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
4877 -#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
4878 -
4879 void assert_pipe(struct drm_i915_private *dev_priv,
4880 enum pipe pipe, bool state)
4881 {
4882 @@ -1255,77 +1238,25 @@ void assert_pipe(struct drm_i915_private *dev_priv,
4883 pipe_name(pipe), onoff(state), onoff(cur_state));
4884 }
4885
4886 -static void assert_plane(struct drm_i915_private *dev_priv,
4887 - enum plane plane, bool state)
4888 +static void assert_plane(struct intel_plane *plane, bool state)
4889 {
4890 - u32 val;
4891 - bool cur_state;
4892 + bool cur_state = plane->get_hw_state(plane);
4893
4894 - val = I915_READ(DSPCNTR(plane));
4895 - cur_state = !!(val & DISPLAY_PLANE_ENABLE);
4896 I915_STATE_WARN(cur_state != state,
4897 - "plane %c assertion failure (expected %s, current %s)\n",
4898 - plane_name(plane), onoff(state), onoff(cur_state));
4899 + "%s assertion failure (expected %s, current %s)\n",
4900 + plane->base.name, onoff(state), onoff(cur_state));
4901 }
4902
4903 -#define assert_plane_enabled(d, p) assert_plane(d, p, true)
4904 -#define assert_plane_disabled(d, p) assert_plane(d, p, false)
4905 +#define assert_plane_enabled(p) assert_plane(p, true)
4906 +#define assert_plane_disabled(p) assert_plane(p, false)
4907
4908 -static void assert_planes_disabled(struct drm_i915_private *dev_priv,
4909 - enum pipe pipe)
4910 +static void assert_planes_disabled(struct intel_crtc *crtc)
4911 {
4912 - int i;
4913 -
4914 - /* Primary planes are fixed to pipes on gen4+ */
4915 - if (INTEL_GEN(dev_priv) >= 4) {
4916 - u32 val = I915_READ(DSPCNTR(pipe));
4917 - I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
4918 - "plane %c assertion failure, should be disabled but not\n",
4919 - plane_name(pipe));
4920 - return;
4921 - }
4922 -
4923 - /* Need to check both planes against the pipe */
4924 - for_each_pipe(dev_priv, i) {
4925 - u32 val = I915_READ(DSPCNTR(i));
4926 - enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4927 - DISPPLANE_SEL_PIPE_SHIFT;
4928 - I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
4929 - "plane %c assertion failure, should be off on pipe %c but is still active\n",
4930 - plane_name(i), pipe_name(pipe));
4931 - }
4932 -}
4933 -
4934 -static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
4935 - enum pipe pipe)
4936 -{
4937 - int sprite;
4938 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4939 + struct intel_plane *plane;
4940
4941 - if (INTEL_GEN(dev_priv) >= 9) {
4942 - for_each_sprite(dev_priv, pipe, sprite) {
4943 - u32 val = I915_READ(PLANE_CTL(pipe, sprite));
4944 - I915_STATE_WARN(val & PLANE_CTL_ENABLE,
4945 - "plane %d assertion failure, should be off on pipe %c but is still active\n",
4946 - sprite, pipe_name(pipe));
4947 - }
4948 - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4949 - for_each_sprite(dev_priv, pipe, sprite) {
4950 - u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
4951 - I915_STATE_WARN(val & SP_ENABLE,
4952 - "sprite %c assertion failure, should be off on pipe %c but is still active\n",
4953 - sprite_name(pipe, sprite), pipe_name(pipe));
4954 - }
4955 - } else if (INTEL_GEN(dev_priv) >= 7) {
4956 - u32 val = I915_READ(SPRCTL(pipe));
4957 - I915_STATE_WARN(val & SPRITE_ENABLE,
4958 - "sprite %c assertion failure, should be off on pipe %c but is still active\n",
4959 - plane_name(pipe), pipe_name(pipe));
4960 - } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
4961 - u32 val = I915_READ(DVSCNTR(pipe));
4962 - I915_STATE_WARN(val & DVS_ENABLE,
4963 - "sprite %c assertion failure, should be off on pipe %c but is still active\n",
4964 - plane_name(pipe), pipe_name(pipe));
4965 - }
4966 + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
4967 + assert_plane_disabled(plane);
4968 }
4969
4970 static void assert_vblank_disabled(struct drm_crtc *crtc)
4971 @@ -1926,9 +1857,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
4972
4973 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
4974
4975 - assert_planes_disabled(dev_priv, pipe);
4976 - assert_cursor_disabled(dev_priv, pipe);
4977 - assert_sprites_disabled(dev_priv, pipe);
4978 + assert_planes_disabled(crtc);
4979
4980 /*
4981 * A pipe without a PLL won't actually be able to drive bits from
4982 @@ -1997,9 +1926,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
4983 * Make sure planes won't keep trying to pump pixels to us,
4984 * or we might hang the display.
4985 */
4986 - assert_planes_disabled(dev_priv, pipe);
4987 - assert_cursor_disabled(dev_priv, pipe);
4988 - assert_sprites_disabled(dev_priv, pipe);
4989 + assert_planes_disabled(crtc);
4990
4991 reg = PIPECONF(cpu_transcoder);
4992 val = I915_READ(reg);
4993 @@ -2829,6 +2756,23 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
4994 crtc_state->active_planes);
4995 }
4996
4997 +static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
4998 + struct intel_plane *plane)
4999 +{
5000 + struct intel_crtc_state *crtc_state =
5001 + to_intel_crtc_state(crtc->base.state);
5002 + struct intel_plane_state *plane_state =
5003 + to_intel_plane_state(plane->base.state);
5004 +
5005 + intel_set_plane_visible(crtc_state, plane_state, false);
5006 +
5007 + if (plane->id == PLANE_PRIMARY)
5008 + intel_pre_disable_primary_noatomic(&crtc->base);
5009 +
5010 + trace_intel_disable_plane(&plane->base, crtc);
5011 + plane->disable_plane(plane, crtc);
5012 +}
5013 +
5014 static void
5015 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
5016 struct intel_initial_plane_config *plane_config)
5017 @@ -2886,12 +2830,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
5018 * simplest solution is to just disable the primary plane now and
5019 * pretend the BIOS never had it enabled.
5020 */
5021 - intel_set_plane_visible(to_intel_crtc_state(crtc_state),
5022 - to_intel_plane_state(plane_state),
5023 - false);
5024 - intel_pre_disable_primary_noatomic(&intel_crtc->base);
5025 - trace_intel_disable_plane(primary, intel_crtc);
5026 - intel_plane->disable_plane(intel_plane, intel_crtc);
5027 + intel_plane_disable_noatomic(intel_crtc, intel_plane);
5028
5029 return;
5030
5031 @@ -3397,6 +3336,31 @@ static void i9xx_disable_primary_plane(struct intel_plane *primary,
5032 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
5033 }
5034
5035 +static bool i9xx_plane_get_hw_state(struct intel_plane *primary)
5036 +{
5037 +
5038 + struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
5039 + enum intel_display_power_domain power_domain;
5040 + enum plane plane = primary->plane;
5041 + enum pipe pipe = primary->pipe;
5042 + bool ret;
5043 +
5044 + /*
5045 + * Not 100% correct for planes that can move between pipes,
5046 + * but that's only the case for gen2-4 which don't have any
5047 + * display power wells.
5048 + */
5049 + power_domain = POWER_DOMAIN_PIPE(pipe);
5050 + if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5051 + return false;
5052 +
5053 + ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE;
5054 +
5055 + intel_display_power_put(dev_priv, power_domain);
5056 +
5057 + return ret;
5058 +}
5059 +
5060 static u32
5061 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
5062 {
5063 @@ -4973,7 +4937,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
5064 * a vblank wait.
5065 */
5066
5067 - assert_plane_enabled(dev_priv, crtc->plane);
5068 + assert_plane_enabled(to_intel_plane(crtc->base.primary));
5069 +
5070 if (IS_BROADWELL(dev_priv)) {
5071 mutex_lock(&dev_priv->rps.hw_lock);
5072 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
5073 @@ -5005,7 +4970,8 @@ void hsw_disable_ips(struct intel_crtc *crtc)
5074 if (!crtc->config->ips_enabled)
5075 return;
5076
5077 - assert_plane_enabled(dev_priv, crtc->plane);
5078 + assert_plane_enabled(to_intel_plane(crtc->base.primary));
5079 +
5080 if (IS_BROADWELL(dev_priv)) {
5081 mutex_lock(&dev_priv->rps.hw_lock);
5082 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5083 @@ -6000,6 +5966,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
5084 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5085 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5086 enum intel_display_power_domain domain;
5087 + struct intel_plane *plane;
5088 u64 domains;
5089 struct drm_atomic_state *state;
5090 struct intel_crtc_state *crtc_state;
5091 @@ -6008,11 +5975,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
5092 if (!intel_crtc->active)
5093 return;
5094
5095 - if (crtc->primary->state->visible) {
5096 - intel_pre_disable_primary_noatomic(crtc);
5097 + for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
5098 + const struct intel_plane_state *plane_state =
5099 + to_intel_plane_state(plane->base.state);
5100
5101 - intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
5102 - crtc->primary->state->visible = false;
5103 + if (plane_state->base.visible)
5104 + intel_plane_disable_noatomic(intel_crtc, plane);
5105 }
5106
5107 state = drm_atomic_state_alloc(crtc->dev);
5108 @@ -9577,6 +9545,23 @@ static void i845_disable_cursor(struct intel_plane *plane,
5109 i845_update_cursor(plane, NULL, NULL);
5110 }
5111
5112 +static bool i845_cursor_get_hw_state(struct intel_plane *plane)
5113 +{
5114 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5115 + enum intel_display_power_domain power_domain;
5116 + bool ret;
5117 +
5118 + power_domain = POWER_DOMAIN_PIPE(PIPE_A);
5119 + if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5120 + return false;
5121 +
5122 + ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
5123 +
5124 + intel_display_power_put(dev_priv, power_domain);
5125 +
5126 + return ret;
5127 +}
5128 +
5129 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
5130 const struct intel_plane_state *plane_state)
5131 {
5132 @@ -9770,6 +9755,28 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
5133 i9xx_update_cursor(plane, NULL, NULL);
5134 }
5135
5136 +static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
5137 +{
5138 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5139 + enum intel_display_power_domain power_domain;
5140 + enum pipe pipe = plane->pipe;
5141 + bool ret;
5142 +
5143 + /*
5144 + * Not 100% correct for planes that can move between pipes,
5145 + * but that's only the case for gen2-3 which don't have any
5146 + * display power wells.
5147 + */
5148 + power_domain = POWER_DOMAIN_PIPE(pipe);
5149 + if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5150 + return false;
5151 +
5152 + ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
5153 +
5154 + intel_display_power_put(dev_priv, power_domain);
5155 +
5156 + return ret;
5157 +}
5158
5159 /* VESA 640x480x72Hz mode to set on the pipe */
5160 static struct drm_display_mode load_detect_mode = {
5161 @@ -13240,6 +13247,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
5162
5163 primary->update_plane = skylake_update_primary_plane;
5164 primary->disable_plane = skylake_disable_primary_plane;
5165 + primary->get_hw_state = skl_plane_get_hw_state;
5166 } else if (INTEL_GEN(dev_priv) >= 9) {
5167 intel_primary_formats = skl_primary_formats;
5168 num_formats = ARRAY_SIZE(skl_primary_formats);
5169 @@ -13250,6 +13258,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
5170
5171 primary->update_plane = skylake_update_primary_plane;
5172 primary->disable_plane = skylake_disable_primary_plane;
5173 + primary->get_hw_state = skl_plane_get_hw_state;
5174 } else if (INTEL_GEN(dev_priv) >= 4) {
5175 intel_primary_formats = i965_primary_formats;
5176 num_formats = ARRAY_SIZE(i965_primary_formats);
5177 @@ -13257,6 +13266,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
5178
5179 primary->update_plane = i9xx_update_primary_plane;
5180 primary->disable_plane = i9xx_disable_primary_plane;
5181 + primary->get_hw_state = i9xx_plane_get_hw_state;
5182 } else {
5183 intel_primary_formats = i8xx_primary_formats;
5184 num_formats = ARRAY_SIZE(i8xx_primary_formats);
5185 @@ -13264,6 +13274,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
5186
5187 primary->update_plane = i9xx_update_primary_plane;
5188 primary->disable_plane = i9xx_disable_primary_plane;
5189 + primary->get_hw_state = i9xx_plane_get_hw_state;
5190 }
5191
5192 if (INTEL_GEN(dev_priv) >= 9)
5193 @@ -13353,10 +13364,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
5194 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
5195 cursor->update_plane = i845_update_cursor;
5196 cursor->disable_plane = i845_disable_cursor;
5197 + cursor->get_hw_state = i845_cursor_get_hw_state;
5198 cursor->check_plane = i845_check_cursor;
5199 } else {
5200 cursor->update_plane = i9xx_update_cursor;
5201 cursor->disable_plane = i9xx_disable_cursor;
5202 + cursor->get_hw_state = i9xx_cursor_get_hw_state;
5203 cursor->check_plane = i9xx_check_cursor;
5204 }
5205
5206 @@ -14704,8 +14717,11 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
5207 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
5208 pipe_name(pipe));
5209
5210 - assert_plane_disabled(dev_priv, PLANE_A);
5211 - assert_plane_disabled(dev_priv, PLANE_B);
5212 + WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
5213 + WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
5214 + WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
5215 + WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
5216 + WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
5217
5218 I915_WRITE(PIPECONF(pipe), 0);
5219 POSTING_READ(PIPECONF(pipe));
5220 @@ -14716,22 +14732,36 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
5221 POSTING_READ(DPLL(pipe));
5222 }
5223
5224 -static bool
5225 -intel_check_plane_mapping(struct intel_crtc *crtc)
5226 +static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
5227 + struct intel_plane *primary)
5228 {
5229 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5230 - u32 val;
5231 + enum plane plane = primary->plane;
5232 + u32 val = I915_READ(DSPCNTR(plane));
5233
5234 - if (INTEL_INFO(dev_priv)->num_pipes == 1)
5235 - return true;
5236 + return (val & DISPLAY_PLANE_ENABLE) == 0 ||
5237 + (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
5238 +}
5239
5240 - val = I915_READ(DSPCNTR(!crtc->plane));
5241 +static void
5242 +intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
5243 +{
5244 + struct intel_crtc *crtc;
5245
5246 - if ((val & DISPLAY_PLANE_ENABLE) &&
5247 - (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
5248 - return false;
5249 + if (INTEL_GEN(dev_priv) >= 4)
5250 + return;
5251
5252 - return true;
5253 + for_each_intel_crtc(&dev_priv->drm, crtc) {
5254 + struct intel_plane *plane =
5255 + to_intel_plane(crtc->base.primary);
5256 +
5257 + if (intel_plane_mapping_ok(crtc, plane))
5258 + continue;
5259 +
5260 + DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
5261 + plane->base.name);
5262 + intel_plane_disable_noatomic(crtc, plane);
5263 + }
5264 }
5265
5266 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
5267 @@ -14787,33 +14817,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
5268
5269 /* Disable everything but the primary plane */
5270 for_each_intel_plane_on_crtc(dev, crtc, plane) {
5271 - if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
5272 - continue;
5273 + const struct intel_plane_state *plane_state =
5274 + to_intel_plane_state(plane->base.state);
5275
5276 - trace_intel_disable_plane(&plane->base, crtc);
5277 - plane->disable_plane(plane, crtc);
5278 + if (plane_state->base.visible &&
5279 + plane->base.type != DRM_PLANE_TYPE_PRIMARY)
5280 + intel_plane_disable_noatomic(crtc, plane);
5281 }
5282 }
5283
5284 - /* We need to sanitize the plane -> pipe mapping first because this will
5285 - * disable the crtc (and hence change the state) if it is wrong. Note
5286 - * that gen4+ has a fixed plane -> pipe mapping. */
5287 - if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
5288 - bool plane;
5289 -
5290 - DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
5291 - crtc->base.base.id, crtc->base.name);
5292 -
5293 - /* Pipe has the wrong plane attached and the plane is active.
5294 - * Temporarily change the plane mapping and disable everything
5295 - * ... */
5296 - plane = crtc->plane;
5297 - crtc->base.primary->state->visible = true;
5298 - crtc->plane = !plane;
5299 - intel_crtc_disable_noatomic(&crtc->base, ctx);
5300 - crtc->plane = plane;
5301 - }
5302 -
5303 /* Adjust the state of the output pipe according to whether we
5304 * have active connectors/encoders. */
5305 if (crtc->active && !intel_crtc_has_encoders(crtc))
5306 @@ -14918,24 +14930,21 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv)
5307 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
5308 }
5309
5310 -static bool primary_get_hw_state(struct intel_plane *plane)
5311 -{
5312 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5313 -
5314 - return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
5315 -}
5316 -
5317 /* FIXME read out full plane state for all planes */
5318 static void readout_plane_state(struct intel_crtc *crtc)
5319 {
5320 - struct intel_plane *primary = to_intel_plane(crtc->base.primary);
5321 - bool visible;
5322 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5323 + struct intel_crtc_state *crtc_state =
5324 + to_intel_crtc_state(crtc->base.state);
5325 + struct intel_plane *plane;
5326
5327 - visible = crtc->active && primary_get_hw_state(primary);
5328 + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5329 + struct intel_plane_state *plane_state =
5330 + to_intel_plane_state(plane->base.state);
5331 + bool visible = plane->get_hw_state(plane);
5332
5333 - intel_set_plane_visible(to_intel_crtc_state(crtc->base.state),
5334 - to_intel_plane_state(primary->base.state),
5335 - visible);
5336 + intel_set_plane_visible(crtc_state, plane_state, visible);
5337 + }
5338 }
5339
5340 static void intel_modeset_readout_hw_state(struct drm_device *dev)
5341 @@ -15137,6 +15146,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
5342 /* HW state is read out, now we need to sanitize this mess. */
5343 get_encoder_power_domains(dev_priv);
5344
5345 + intel_sanitize_plane_mapping(dev_priv);
5346 +
5347 for_each_intel_encoder(dev, encoder) {
5348 intel_sanitize_encoder(encoder);
5349 }
5350 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
5351 index 09f274419eea..76cf68745870 100644
5352 --- a/drivers/gpu/drm/i915/intel_dp.c
5353 +++ b/drivers/gpu/drm/i915/intel_dp.c
5354 @@ -5340,6 +5340,12 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5355 */
5356 final->t8 = 1;
5357 final->t9 = 1;
5358 +
5359 + /*
5360 + * HW has only a 100msec granularity for t11_t12 so round it up
5361 + * accordingly.
5362 + */
5363 + final->t11_t12 = roundup(final->t11_t12, 100 * 10);
5364 }
5365
5366 static void
5367 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
5368 index 79fbaf78f604..10ae9681f02d 100644
5369 --- a/drivers/gpu/drm/i915/intel_drv.h
5370 +++ b/drivers/gpu/drm/i915/intel_drv.h
5371 @@ -863,6 +863,7 @@ struct intel_plane {
5372 const struct intel_plane_state *plane_state);
5373 void (*disable_plane)(struct intel_plane *plane,
5374 struct intel_crtc *crtc);
5375 + bool (*get_hw_state)(struct intel_plane *plane);
5376 int (*check_plane)(struct intel_plane *plane,
5377 struct intel_crtc_state *crtc_state,
5378 struct intel_plane_state *state);
5379 @@ -1885,6 +1886,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
5380 struct drm_file *file_priv);
5381 void intel_pipe_update_start(struct intel_crtc *crtc);
5382 void intel_pipe_update_end(struct intel_crtc *crtc);
5383 +bool skl_plane_get_hw_state(struct intel_plane *plane);
5384
5385 /* intel_tv.c */
5386 void intel_tv_init(struct drm_i915_private *dev_priv);
5387 diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
5388 index 524933b01483..f8ebeb5ffb96 100644
5389 --- a/drivers/gpu/drm/i915/intel_sprite.c
5390 +++ b/drivers/gpu/drm/i915/intel_sprite.c
5391 @@ -324,6 +324,26 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
5392 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
5393 }
5394
5395 +bool
5396 +skl_plane_get_hw_state(struct intel_plane *plane)
5397 +{
5398 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5399 + enum intel_display_power_domain power_domain;
5400 + enum plane_id plane_id = plane->id;
5401 + enum pipe pipe = plane->pipe;
5402 + bool ret;
5403 +
5404 + power_domain = POWER_DOMAIN_PIPE(pipe);
5405 + if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5406 + return false;
5407 +
5408 + ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
5409 +
5410 + intel_display_power_put(dev_priv, power_domain);
5411 +
5412 + return ret;
5413 +}
5414 +
5415 static void
5416 chv_update_csc(struct intel_plane *plane, uint32_t format)
5417 {
5418 @@ -501,6 +521,26 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
5419 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
5420 }
5421
5422 +static bool
5423 +vlv_plane_get_hw_state(struct intel_plane *plane)
5424 +{
5425 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5426 + enum intel_display_power_domain power_domain;
5427 + enum plane_id plane_id = plane->id;
5428 + enum pipe pipe = plane->pipe;
5429 + bool ret;
5430 +
5431 + power_domain = POWER_DOMAIN_PIPE(pipe);
5432 + if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5433 + return false;
5434 +
5435 + ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
5436 +
5437 + intel_display_power_put(dev_priv, power_domain);
5438 +
5439 + return ret;
5440 +}
5441 +
5442 static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
5443 const struct intel_plane_state *plane_state)
5444 {
5445 @@ -641,6 +681,25 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
5446 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
5447 }
5448
5449 +static bool
5450 +ivb_plane_get_hw_state(struct intel_plane *plane)
5451 +{
5452 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5453 + enum intel_display_power_domain power_domain;
5454 + enum pipe pipe = plane->pipe;
5455 + bool ret;
5456 +
5457 + power_domain = POWER_DOMAIN_PIPE(pipe);
5458 + if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5459 + return false;
5460 +
5461 + ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
5462 +
5463 + intel_display_power_put(dev_priv, power_domain);
5464 +
5465 + return ret;
5466 +}
5467 +
5468 static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
5469 const struct intel_plane_state *plane_state)
5470 {
5471 @@ -772,6 +831,25 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
5472 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
5473 }
5474
5475 +static bool
5476 +g4x_plane_get_hw_state(struct intel_plane *plane)
5477 +{
5478 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5479 + enum intel_display_power_domain power_domain;
5480 + enum pipe pipe = plane->pipe;
5481 + bool ret;
5482 +
5483 + power_domain = POWER_DOMAIN_PIPE(pipe);
5484 + if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5485 + return false;
5486 +
5487 + ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
5488 +
5489 + intel_display_power_put(dev_priv, power_domain);
5490 +
5491 + return ret;
5492 +}
5493 +
5494 static int
5495 intel_check_sprite_plane(struct intel_plane *plane,
5496 struct intel_crtc_state *crtc_state,
5497 @@ -1227,6 +1305,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
5498
5499 intel_plane->update_plane = skl_update_plane;
5500 intel_plane->disable_plane = skl_disable_plane;
5501 + intel_plane->get_hw_state = skl_plane_get_hw_state;
5502
5503 plane_formats = skl_plane_formats;
5504 num_plane_formats = ARRAY_SIZE(skl_plane_formats);
5505 @@ -1237,6 +1316,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
5506
5507 intel_plane->update_plane = skl_update_plane;
5508 intel_plane->disable_plane = skl_disable_plane;
5509 + intel_plane->get_hw_state = skl_plane_get_hw_state;
5510
5511 plane_formats = skl_plane_formats;
5512 num_plane_formats = ARRAY_SIZE(skl_plane_formats);
5513 @@ -1247,6 +1327,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
5514
5515 intel_plane->update_plane = vlv_update_plane;
5516 intel_plane->disable_plane = vlv_disable_plane;
5517 + intel_plane->get_hw_state = vlv_plane_get_hw_state;
5518
5519 plane_formats = vlv_plane_formats;
5520 num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
5521 @@ -1262,6 +1343,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
5522
5523 intel_plane->update_plane = ivb_update_plane;
5524 intel_plane->disable_plane = ivb_disable_plane;
5525 + intel_plane->get_hw_state = ivb_plane_get_hw_state;
5526
5527 plane_formats = snb_plane_formats;
5528 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
5529 @@ -1272,6 +1354,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
5530
5531 intel_plane->update_plane = g4x_update_plane;
5532 intel_plane->disable_plane = g4x_disable_plane;
5533 + intel_plane->get_hw_state = g4x_plane_get_hw_state;
5534
5535 modifiers = i9xx_plane_format_modifiers;
5536 if (IS_GEN6(dev_priv)) {
5537 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
5538 index 330ca983828b..5744eb729d24 100644
5539 --- a/drivers/hid/hid-core.c
5540 +++ b/drivers/hid/hid-core.c
5541 @@ -2638,7 +2638,6 @@ static const struct hid_device_id hid_ignore_list[] = {
5542 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
5543 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
5544 { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
5545 - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
5546 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
5547 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
5548 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
5549 @@ -2908,6 +2907,17 @@ bool hid_ignore(struct hid_device *hdev)
5550 strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
5551 return true;
5552 break;
5553 + case USB_VENDOR_ID_ELAN:
5554 + /*
5555 + * Many Elan devices have a product id of 0x0401 and are handled
5556 + * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev
5557 + * is not (and cannot be) handled by that driver ->
5558 + * Ignore all 0x0401 devs except for the ELAN0800 dev.
5559 + */
5560 + if (hdev->product == 0x0401 &&
5561 + strncmp(hdev->name, "ELAN0800", 8) != 0)
5562 + return true;
5563 + break;
5564 }
5565
5566 if (hdev->type == HID_TYPE_USBMOUSE &&
5567 diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
5568 index 0ee0df53b91b..79d5d89bc95e 100644
5569 --- a/drivers/media/dvb-frontends/ascot2e.c
5570 +++ b/drivers/media/dvb-frontends/ascot2e.c
5571 @@ -155,7 +155,9 @@ static int ascot2e_write_regs(struct ascot2e_priv *priv,
5572
5573 static int ascot2e_write_reg(struct ascot2e_priv *priv, u8 reg, u8 val)
5574 {
5575 - return ascot2e_write_regs(priv, reg, &val, 1);
5576 + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5577 +
5578 + return ascot2e_write_regs(priv, reg, &tmp, 1);
5579 }
5580
5581 static int ascot2e_read_regs(struct ascot2e_priv *priv,
5582 diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
5583 index 48ee9bc00c06..ccbd84fd6428 100644
5584 --- a/drivers/media/dvb-frontends/cxd2841er.c
5585 +++ b/drivers/media/dvb-frontends/cxd2841er.c
5586 @@ -257,7 +257,9 @@ static int cxd2841er_write_regs(struct cxd2841er_priv *priv,
5587 static int cxd2841er_write_reg(struct cxd2841er_priv *priv,
5588 u8 addr, u8 reg, u8 val)
5589 {
5590 - return cxd2841er_write_regs(priv, addr, reg, &val, 1);
5591 + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5592 +
5593 + return cxd2841er_write_regs(priv, addr, reg, &tmp, 1);
5594 }
5595
5596 static int cxd2841er_read_regs(struct cxd2841er_priv *priv,
5597 diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
5598 index 4bf5a551ba40..2ab8d83e5576 100644
5599 --- a/drivers/media/dvb-frontends/helene.c
5600 +++ b/drivers/media/dvb-frontends/helene.c
5601 @@ -331,7 +331,9 @@ static int helene_write_regs(struct helene_priv *priv,
5602
5603 static int helene_write_reg(struct helene_priv *priv, u8 reg, u8 val)
5604 {
5605 - return helene_write_regs(priv, reg, &val, 1);
5606 + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5607 +
5608 + return helene_write_regs(priv, reg, &tmp, 1);
5609 }
5610
5611 static int helene_read_regs(struct helene_priv *priv,
5612 diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
5613 index 68d759c4c52e..5c8b405f2ddc 100644
5614 --- a/drivers/media/dvb-frontends/horus3a.c
5615 +++ b/drivers/media/dvb-frontends/horus3a.c
5616 @@ -89,7 +89,9 @@ static int horus3a_write_regs(struct horus3a_priv *priv,
5617
5618 static int horus3a_write_reg(struct horus3a_priv *priv, u8 reg, u8 val)
5619 {
5620 - return horus3a_write_regs(priv, reg, &val, 1);
5621 + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5622 +
5623 + return horus3a_write_regs(priv, reg, &tmp, 1);
5624 }
5625
5626 static int horus3a_enter_power_save(struct horus3a_priv *priv)
5627 diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
5628 index 5bb1e73a10b4..ce7c443d3eac 100644
5629 --- a/drivers/media/dvb-frontends/itd1000.c
5630 +++ b/drivers/media/dvb-frontends/itd1000.c
5631 @@ -95,8 +95,9 @@ static int itd1000_read_reg(struct itd1000_state *state, u8 reg)
5632
5633 static inline int itd1000_write_reg(struct itd1000_state *state, u8 r, u8 v)
5634 {
5635 - int ret = itd1000_write_regs(state, r, &v, 1);
5636 - state->shadow[r] = v;
5637 + u8 tmp = v; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5638 + int ret = itd1000_write_regs(state, r, &tmp, 1);
5639 + state->shadow[r] = tmp;
5640 return ret;
5641 }
5642
5643 diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
5644 index 961b9a2508e0..0b23cbc021b8 100644
5645 --- a/drivers/media/dvb-frontends/mt312.c
5646 +++ b/drivers/media/dvb-frontends/mt312.c
5647 @@ -142,7 +142,10 @@ static inline int mt312_readreg(struct mt312_state *state,
5648 static inline int mt312_writereg(struct mt312_state *state,
5649 const enum mt312_reg_addr reg, const u8 val)
5650 {
5651 - return mt312_write(state, reg, &val, 1);
5652 + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5653 +
5654 +
5655 + return mt312_write(state, reg, &tmp, 1);
5656 }
5657
5658 static inline u32 mt312_div(u32 a, u32 b)
5659 diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
5660 index 02347598277a..db5dde3215f0 100644
5661 --- a/drivers/media/dvb-frontends/stb0899_drv.c
5662 +++ b/drivers/media/dvb-frontends/stb0899_drv.c
5663 @@ -539,7 +539,8 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data,
5664
5665 int stb0899_write_reg(struct stb0899_state *state, unsigned int reg, u8 data)
5666 {
5667 - return stb0899_write_regs(state, reg, &data, 1);
5668 + u8 tmp = data;
5669 + return stb0899_write_regs(state, reg, &tmp, 1);
5670 }
5671
5672 /*
5673 diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
5674 index 17a955d0031b..75509bec66e4 100644
5675 --- a/drivers/media/dvb-frontends/stb6100.c
5676 +++ b/drivers/media/dvb-frontends/stb6100.c
5677 @@ -226,12 +226,14 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st
5678
5679 static int stb6100_write_reg(struct stb6100_state *state, u8 reg, u8 data)
5680 {
5681 + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5682 +
5683 if (unlikely(reg >= STB6100_NUMREGS)) {
5684 dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg);
5685 return -EREMOTEIO;
5686 }
5687 - data = (data & stb6100_template[reg].mask) | stb6100_template[reg].set;
5688 - return stb6100_write_reg_range(state, &data, reg, 1);
5689 + tmp = (tmp & stb6100_template[reg].mask) | stb6100_template[reg].set;
5690 + return stb6100_write_reg_range(state, &tmp, reg, 1);
5691 }
5692
5693
5694 diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
5695 index f3529df8211d..1a726196c126 100644
5696 --- a/drivers/media/dvb-frontends/stv0367.c
5697 +++ b/drivers/media/dvb-frontends/stv0367.c
5698 @@ -166,7 +166,9 @@ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
5699
5700 static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
5701 {
5702 - return stv0367_writeregs(state, reg, &data, 1);
5703 + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5704 +
5705 + return stv0367_writeregs(state, reg, &tmp, 1);
5706 }
5707
5708 static u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
5709 diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
5710 index 7ef469c0c866..2695e1eb6d9c 100644
5711 --- a/drivers/media/dvb-frontends/stv090x.c
5712 +++ b/drivers/media/dvb-frontends/stv090x.c
5713 @@ -755,7 +755,9 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8
5714
5715 static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 data)
5716 {
5717 - return stv090x_write_regs(state, reg, &data, 1);
5718 + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5719 +
5720 + return stv090x_write_regs(state, reg, &tmp, 1);
5721 }
5722
5723 static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable)
5724 diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
5725 index 66eba38f1014..7e8e01389c55 100644
5726 --- a/drivers/media/dvb-frontends/stv6110x.c
5727 +++ b/drivers/media/dvb-frontends/stv6110x.c
5728 @@ -97,7 +97,9 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da
5729
5730 static int stv6110x_write_reg(struct stv6110x_state *stv6110x, u8 reg, u8 data)
5731 {
5732 - return stv6110x_write_regs(stv6110x, reg, &data, 1);
5733 + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5734 +
5735 + return stv6110x_write_regs(stv6110x, reg, &tmp, 1);
5736 }
5737
5738 static int stv6110x_init(struct dvb_frontend *fe)
5739 diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
5740 index 931e5c98da8a..b879e1571469 100644
5741 --- a/drivers/media/dvb-frontends/ts2020.c
5742 +++ b/drivers/media/dvb-frontends/ts2020.c
5743 @@ -368,7 +368,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc,
5744 gain2 = clamp_t(long, gain2, 0, 13);
5745 v_agc = clamp_t(long, v_agc, 400, 1100);
5746
5747 - *_gain = -(gain1 * 2330 +
5748 + *_gain = -((__s64)gain1 * 2330 +
5749 gain2 * 3500 +
5750 v_agc * 24 / 10 * 10 +
5751 10000);
5752 @@ -386,7 +386,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc,
5753 gain3 = clamp_t(long, gain3, 0, 6);
5754 v_agc = clamp_t(long, v_agc, 600, 1600);
5755
5756 - *_gain = -(gain1 * 2650 +
5757 + *_gain = -((__s64)gain1 * 2650 +
5758 gain2 * 3380 +
5759 gain3 * 2850 +
5760 v_agc * 176 / 100 * 10 -
5761 diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
5762 index 623355fc2666..3208b866d1cb 100644
5763 --- a/drivers/media/dvb-frontends/zl10039.c
5764 +++ b/drivers/media/dvb-frontends/zl10039.c
5765 @@ -134,7 +134,9 @@ static inline int zl10039_writereg(struct zl10039_state *state,
5766 const enum zl10039_reg_addr reg,
5767 const u8 val)
5768 {
5769 - return zl10039_write(state, reg, &val, 1);
5770 + const u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
5771 +
5772 + return zl10039_write(state, reg, &tmp, 1);
5773 }
5774
5775 static int zl10039_init(struct dvb_frontend *fe)
5776 diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
5777 index 5e320fa4a795..be26c029546b 100644
5778 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
5779 +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
5780 @@ -494,18 +494,23 @@ static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
5781
5782 static int lme2510_return_status(struct dvb_usb_device *d)
5783 {
5784 - int ret = 0;
5785 + int ret;
5786 u8 *data;
5787
5788 - data = kzalloc(10, GFP_KERNEL);
5789 + data = kzalloc(6, GFP_KERNEL);
5790 if (!data)
5791 return -ENOMEM;
5792
5793 - ret |= usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
5794 - 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200);
5795 - info("Firmware Status: %x (%x)", ret , data[2]);
5796 + ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
5797 + 0x06, 0x80, 0x0302, 0x00,
5798 + data, 0x6, 200);
5799 + if (ret != 6)
5800 + ret = -EINVAL;
5801 + else
5802 + ret = data[2];
5803 +
5804 + info("Firmware Status: %6ph", data);
5805
5806 - ret = (ret < 0) ? -ENODEV : data[2];
5807 kfree(data);
5808 return ret;
5809 }
5810 @@ -1071,8 +1076,6 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
5811
5812 if (adap->fe[0]) {
5813 info("FE Found M88RS2000");
5814 - dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config,
5815 - &d->i2c_adap);
5816 st->i2c_tuner_gate_w = 5;
5817 st->i2c_tuner_gate_r = 5;
5818 st->i2c_tuner_addr = 0x60;
5819 @@ -1138,17 +1141,18 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
5820 ret = st->tuner_config;
5821 break;
5822 case TUNER_RS2000:
5823 - ret = st->tuner_config;
5824 + if (dvb_attach(ts2020_attach, adap->fe[0],
5825 + &ts2020_config, &d->i2c_adap))
5826 + ret = st->tuner_config;
5827 break;
5828 default:
5829 break;
5830 }
5831
5832 - if (ret)
5833 + if (ret) {
5834 info("TUN Found %s tuner", tun_msg[ret]);
5835 - else {
5836 - info("TUN No tuner found --- resetting device");
5837 - lme_coldreset(d);
5838 + } else {
5839 + info("TUN No tuner found");
5840 return -ENODEV;
5841 }
5842
5843 @@ -1189,6 +1193,7 @@ static int lme2510_get_adapter_count(struct dvb_usb_device *d)
5844 static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
5845 {
5846 struct lme2510_state *st = d->priv;
5847 + int status;
5848
5849 usb_reset_configuration(d->udev);
5850
5851 @@ -1197,12 +1202,16 @@ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
5852
5853 st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware;
5854
5855 - if (lme2510_return_status(d) == 0x44) {
5856 + status = lme2510_return_status(d);
5857 + if (status == 0x44) {
5858 *name = lme_firmware_switch(d, 0);
5859 return COLD;
5860 }
5861
5862 - return 0;
5863 + if (status != 0x47)
5864 + return -EINVAL;
5865 +
5866 + return WARM;
5867 }
5868
5869 static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
5870 diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
5871 index 37dea0adc695..cfe86b4864b3 100644
5872 --- a/drivers/media/usb/dvb-usb/cxusb.c
5873 +++ b/drivers/media/usb/dvb-usb/cxusb.c
5874 @@ -677,6 +677,8 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component,
5875 case XC2028_RESET_CLK:
5876 deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg);
5877 break;
5878 + case XC2028_I2C_FLUSH:
5879 + break;
5880 default:
5881 deb_info("%s: unknown command %d, arg %d\n", __func__,
5882 command, arg);
5883 diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
5884 index 92098c1b78e5..9be1e658ef47 100644
5885 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c
5886 +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
5887 @@ -430,6 +430,7 @@ static int stk7700ph_xc3028_callback(void *ptr, int component,
5888 state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
5889 break;
5890 case XC2028_RESET_CLK:
5891 + case XC2028_I2C_FLUSH:
5892 break;
5893 default:
5894 err("%s: unknown command %d, arg %d\n", __func__,
5895 diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
5896 index dbe29c6c4d8b..1e8cbaf36896 100644
5897 --- a/drivers/media/usb/hdpvr/hdpvr-core.c
5898 +++ b/drivers/media/usb/hdpvr/hdpvr-core.c
5899 @@ -292,7 +292,7 @@ static int hdpvr_probe(struct usb_interface *interface,
5900 /* register v4l2_device early so it can be used for printks */
5901 if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
5902 dev_err(&interface->dev, "v4l2_device_register failed\n");
5903 - goto error;
5904 + goto error_free_dev;
5905 }
5906
5907 mutex_init(&dev->io_mutex);
5908 @@ -301,7 +301,7 @@ static int hdpvr_probe(struct usb_interface *interface,
5909 dev->usbc_buf = kmalloc(64, GFP_KERNEL);
5910 if (!dev->usbc_buf) {
5911 v4l2_err(&dev->v4l2_dev, "Out of memory\n");
5912 - goto error;
5913 + goto error_v4l2_unregister;
5914 }
5915
5916 init_waitqueue_head(&dev->wait_buffer);
5917 @@ -339,13 +339,13 @@ static int hdpvr_probe(struct usb_interface *interface,
5918 }
5919 if (!dev->bulk_in_endpointAddr) {
5920 v4l2_err(&dev->v4l2_dev, "Could not find bulk-in endpoint\n");
5921 - goto error;
5922 + goto error_put_usb;
5923 }
5924
5925 /* init the device */
5926 if (hdpvr_device_init(dev)) {
5927 v4l2_err(&dev->v4l2_dev, "device init failed\n");
5928 - goto error;
5929 + goto error_put_usb;
5930 }
5931
5932 mutex_lock(&dev->io_mutex);
5933 @@ -353,7 +353,7 @@ static int hdpvr_probe(struct usb_interface *interface,
5934 mutex_unlock(&dev->io_mutex);
5935 v4l2_err(&dev->v4l2_dev,
5936 "allocating transfer buffers failed\n");
5937 - goto error;
5938 + goto error_put_usb;
5939 }
5940 mutex_unlock(&dev->io_mutex);
5941
5942 @@ -361,7 +361,7 @@ static int hdpvr_probe(struct usb_interface *interface,
5943 retval = hdpvr_register_i2c_adapter(dev);
5944 if (retval < 0) {
5945 v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
5946 - goto error;
5947 + goto error_free_buffers;
5948 }
5949
5950 client = hdpvr_register_ir_rx_i2c(dev);
5951 @@ -394,13 +394,17 @@ static int hdpvr_probe(struct usb_interface *interface,
5952 reg_fail:
5953 #if IS_ENABLED(CONFIG_I2C)
5954 i2c_del_adapter(&dev->i2c_adapter);
5955 +error_free_buffers:
5956 #endif
5957 + hdpvr_free_buffers(dev);
5958 +error_put_usb:
5959 + usb_put_dev(dev->udev);
5960 + kfree(dev->usbc_buf);
5961 +error_v4l2_unregister:
5962 + v4l2_device_unregister(&dev->v4l2_dev);
5963 +error_free_dev:
5964 + kfree(dev);
5965 error:
5966 - if (dev) {
5967 - flush_work(&dev->worker);
5968 - /* this frees allocated memory */
5969 - hdpvr_delete(dev);
5970 - }
5971 return retval;
5972 }
5973
5974 diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
5975 index 821f2aa299ae..cbeea8343a5c 100644
5976 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
5977 +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
5978 @@ -18,8 +18,18 @@
5979 #include <linux/videodev2.h>
5980 #include <linux/v4l2-subdev.h>
5981 #include <media/v4l2-dev.h>
5982 +#include <media/v4l2-fh.h>
5983 +#include <media/v4l2-ctrls.h>
5984 #include <media/v4l2-ioctl.h>
5985
5986 +/* Use the same argument order as copy_in_user */
5987 +#define assign_in_user(to, from) \
5988 +({ \
5989 + typeof(*from) __assign_tmp; \
5990 + \
5991 + get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \
5992 +})
5993 +
5994 static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5995 {
5996 long ret = -ENOIOCTLCMD;
5997 @@ -46,135 +56,75 @@ struct v4l2_window32 {
5998 __u8 global_alpha;
5999 };
6000
6001 -static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
6002 -{
6003 - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) ||
6004 - copy_from_user(&kp->w, &up->w, sizeof(up->w)) ||
6005 - get_user(kp->field, &up->field) ||
6006 - get_user(kp->chromakey, &up->chromakey) ||
6007 - get_user(kp->clipcount, &up->clipcount) ||
6008 - get_user(kp->global_alpha, &up->global_alpha))
6009 - return -EFAULT;
6010 - if (kp->clipcount > 2048)
6011 - return -EINVAL;
6012 - if (kp->clipcount) {
6013 - struct v4l2_clip32 __user *uclips;
6014 - struct v4l2_clip __user *kclips;
6015 - int n = kp->clipcount;
6016 - compat_caddr_t p;
6017 -
6018 - if (get_user(p, &up->clips))
6019 - return -EFAULT;
6020 - uclips = compat_ptr(p);
6021 - kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip));
6022 - kp->clips = kclips;
6023 - while (--n >= 0) {
6024 - if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
6025 - return -EFAULT;
6026 - if (put_user(n ? kclips + 1 : NULL, &kclips->next))
6027 - return -EFAULT;
6028 - uclips += 1;
6029 - kclips += 1;
6030 - }
6031 - } else
6032 - kp->clips = NULL;
6033 - return 0;
6034 -}
6035 -
6036 -static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
6037 -{
6038 - if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) ||
6039 - put_user(kp->field, &up->field) ||
6040 - put_user(kp->chromakey, &up->chromakey) ||
6041 - put_user(kp->clipcount, &up->clipcount) ||
6042 - put_user(kp->global_alpha, &up->global_alpha))
6043 - return -EFAULT;
6044 - return 0;
6045 -}
6046 -
6047 -static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
6048 -{
6049 - if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
6050 - return -EFAULT;
6051 - return 0;
6052 -}
6053 -
6054 -static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
6055 - struct v4l2_pix_format_mplane __user *up)
6056 -{
6057 - if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
6058 - return -EFAULT;
6059 - return 0;
6060 -}
6061 -
6062 -static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
6063 +static int get_v4l2_window32(struct v4l2_window __user *kp,
6064 + struct v4l2_window32 __user *up,
6065 + void __user *aux_buf, u32 aux_space)
6066 {
6067 - if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
6068 - return -EFAULT;
6069 - return 0;
6070 -}
6071 -
6072 -static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
6073 - struct v4l2_pix_format_mplane __user *up)
6074 -{
6075 - if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
6076 - return -EFAULT;
6077 - return 0;
6078 -}
6079 -
6080 -static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
6081 -{
6082 - if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
6083 - return -EFAULT;
6084 - return 0;
6085 -}
6086 -
6087 -static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
6088 -{
6089 - if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format)))
6090 + struct v4l2_clip32 __user *uclips;
6091 + struct v4l2_clip __user *kclips;
6092 + compat_caddr_t p;
6093 + u32 clipcount;
6094 +
6095 + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6096 + copy_in_user(&kp->w, &up->w, sizeof(up->w)) ||
6097 + assign_in_user(&kp->field, &up->field) ||
6098 + assign_in_user(&kp->chromakey, &up->chromakey) ||
6099 + assign_in_user(&kp->global_alpha, &up->global_alpha) ||
6100 + get_user(clipcount, &up->clipcount) ||
6101 + put_user(clipcount, &kp->clipcount))
6102 return -EFAULT;
6103 - return 0;
6104 -}
6105 + if (clipcount > 2048)
6106 + return -EINVAL;
6107 + if (!clipcount)
6108 + return put_user(NULL, &kp->clips);
6109
6110 -static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
6111 -{
6112 - if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
6113 + if (get_user(p, &up->clips))
6114 return -EFAULT;
6115 - return 0;
6116 -}
6117 -
6118 -static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
6119 -{
6120 - if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
6121 + uclips = compat_ptr(p);
6122 + if (aux_space < clipcount * sizeof(*kclips))
6123 return -EFAULT;
6124 - return 0;
6125 -}
6126 -
6127 -static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
6128 -{
6129 - if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format)))
6130 + kclips = aux_buf;
6131 + if (put_user(kclips, &kp->clips))
6132 return -EFAULT;
6133 - return 0;
6134 -}
6135
6136 -static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
6137 -{
6138 - if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format)))
6139 - return -EFAULT;
6140 + while (clipcount--) {
6141 + if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
6142 + return -EFAULT;
6143 + if (put_user(clipcount ? kclips + 1 : NULL, &kclips->next))
6144 + return -EFAULT;
6145 + uclips++;
6146 + kclips++;
6147 + }
6148 return 0;
6149 }
6150
6151 -static inline int get_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up)
6152 +static int put_v4l2_window32(struct v4l2_window __user *kp,
6153 + struct v4l2_window32 __user *up)
6154 {
6155 - if (copy_from_user(kp, up, sizeof(struct v4l2_meta_format)))
6156 + struct v4l2_clip __user *kclips = kp->clips;
6157 + struct v4l2_clip32 __user *uclips;
6158 + compat_caddr_t p;
6159 + u32 clipcount;
6160 +
6161 + if (copy_in_user(&up->w, &kp->w, sizeof(kp->w)) ||
6162 + assign_in_user(&up->field, &kp->field) ||
6163 + assign_in_user(&up->chromakey, &kp->chromakey) ||
6164 + assign_in_user(&up->global_alpha, &kp->global_alpha) ||
6165 + get_user(clipcount, &kp->clipcount) ||
6166 + put_user(clipcount, &up->clipcount))
6167 return -EFAULT;
6168 - return 0;
6169 -}
6170 + if (!clipcount)
6171 + return 0;
6172
6173 -static inline int put_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up)
6174 -{
6175 - if (copy_to_user(up, kp, sizeof(struct v4l2_meta_format)))
6176 + if (get_user(p, &up->clips))
6177 return -EFAULT;
6178 + uclips = compat_ptr(p);
6179 + while (clipcount--) {
6180 + if (copy_in_user(&uclips->c, &kclips->c, sizeof(uclips->c)))
6181 + return -EFAULT;
6182 + uclips++;
6183 + kclips++;
6184 + }
6185 return 0;
6186 }
6187
6188 @@ -209,101 +159,164 @@ struct v4l2_create_buffers32 {
6189 __u32 reserved[8];
6190 };
6191
6192 -static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
6193 +static int __bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size)
6194 {
6195 - if (get_user(kp->type, &up->type))
6196 + u32 type;
6197 +
6198 + if (get_user(type, &up->type))
6199 return -EFAULT;
6200
6201 - switch (kp->type) {
6202 + switch (type) {
6203 + case V4L2_BUF_TYPE_VIDEO_OVERLAY:
6204 + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: {
6205 + u32 clipcount;
6206 +
6207 + if (get_user(clipcount, &up->fmt.win.clipcount))
6208 + return -EFAULT;
6209 + if (clipcount > 2048)
6210 + return -EINVAL;
6211 + *size = clipcount * sizeof(struct v4l2_clip);
6212 + return 0;
6213 + }
6214 + default:
6215 + *size = 0;
6216 + return 0;
6217 + }
6218 +}
6219 +
6220 +static int bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size)
6221 +{
6222 + if (!access_ok(VERIFY_READ, up, sizeof(*up)))
6223 + return -EFAULT;
6224 + return __bufsize_v4l2_format(up, size);
6225 +}
6226 +
6227 +static int __get_v4l2_format32(struct v4l2_format __user *kp,
6228 + struct v4l2_format32 __user *up,
6229 + void __user *aux_buf, u32 aux_space)
6230 +{
6231 + u32 type;
6232 +
6233 + if (get_user(type, &up->type) || put_user(type, &kp->type))
6234 + return -EFAULT;
6235 +
6236 + switch (type) {
6237 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
6238 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
6239 - return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
6240 + return copy_in_user(&kp->fmt.pix, &up->fmt.pix,
6241 + sizeof(kp->fmt.pix)) ? -EFAULT : 0;
6242 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
6243 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
6244 - return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
6245 - &up->fmt.pix_mp);
6246 + return copy_in_user(&kp->fmt.pix_mp, &up->fmt.pix_mp,
6247 + sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0;
6248 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
6249 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
6250 - return get_v4l2_window32(&kp->fmt.win, &up->fmt.win);
6251 + return get_v4l2_window32(&kp->fmt.win, &up->fmt.win,
6252 + aux_buf, aux_space);
6253 case V4L2_BUF_TYPE_VBI_CAPTURE:
6254 case V4L2_BUF_TYPE_VBI_OUTPUT:
6255 - return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
6256 + return copy_in_user(&kp->fmt.vbi, &up->fmt.vbi,
6257 + sizeof(kp->fmt.vbi)) ? -EFAULT : 0;
6258 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
6259 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
6260 - return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
6261 + return copy_in_user(&kp->fmt.sliced, &up->fmt.sliced,
6262 + sizeof(kp->fmt.sliced)) ? -EFAULT : 0;
6263 case V4L2_BUF_TYPE_SDR_CAPTURE:
6264 case V4L2_BUF_TYPE_SDR_OUTPUT:
6265 - return get_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr);
6266 + return copy_in_user(&kp->fmt.sdr, &up->fmt.sdr,
6267 + sizeof(kp->fmt.sdr)) ? -EFAULT : 0;
6268 case V4L2_BUF_TYPE_META_CAPTURE:
6269 - return get_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta);
6270 + return copy_in_user(&kp->fmt.meta, &up->fmt.meta,
6271 + sizeof(kp->fmt.meta)) ? -EFAULT : 0;
6272 default:
6273 - pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
6274 - kp->type);
6275 return -EINVAL;
6276 }
6277 }
6278
6279 -static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
6280 +static int get_v4l2_format32(struct v4l2_format __user *kp,
6281 + struct v4l2_format32 __user *up,
6282 + void __user *aux_buf, u32 aux_space)
6283 +{
6284 + if (!access_ok(VERIFY_READ, up, sizeof(*up)))
6285 + return -EFAULT;
6286 + return __get_v4l2_format32(kp, up, aux_buf, aux_space);
6287 +}
6288 +
6289 +static int bufsize_v4l2_create(struct v4l2_create_buffers32 __user *up,
6290 + u32 *size)
6291 {
6292 - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
6293 + if (!access_ok(VERIFY_READ, up, sizeof(*up)))
6294 return -EFAULT;
6295 - return __get_v4l2_format32(kp, up);
6296 + return __bufsize_v4l2_format(&up->format, size);
6297 }
6298
6299 -static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
6300 +static int get_v4l2_create32(struct v4l2_create_buffers __user *kp,
6301 + struct v4l2_create_buffers32 __user *up,
6302 + void __user *aux_buf, u32 aux_space)
6303 {
6304 - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
6305 - copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
6306 + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6307 + copy_in_user(kp, up,
6308 + offsetof(struct v4l2_create_buffers32, format)))
6309 return -EFAULT;
6310 - return __get_v4l2_format32(&kp->format, &up->format);
6311 + return __get_v4l2_format32(&kp->format, &up->format,
6312 + aux_buf, aux_space);
6313 }
6314
6315 -static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
6316 +static int __put_v4l2_format32(struct v4l2_format __user *kp,
6317 + struct v4l2_format32 __user *up)
6318 {
6319 - if (put_user(kp->type, &up->type))
6320 + u32 type;
6321 +
6322 + if (get_user(type, &kp->type))
6323 return -EFAULT;
6324
6325 - switch (kp->type) {
6326 + switch (type) {
6327 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
6328 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
6329 - return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
6330 + return copy_in_user(&up->fmt.pix, &kp->fmt.pix,
6331 + sizeof(kp->fmt.pix)) ? -EFAULT : 0;
6332 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
6333 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
6334 - return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
6335 - &up->fmt.pix_mp);
6336 + return copy_in_user(&up->fmt.pix_mp, &kp->fmt.pix_mp,
6337 + sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0;
6338 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
6339 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
6340 return put_v4l2_window32(&kp->fmt.win, &up->fmt.win);
6341 case V4L2_BUF_TYPE_VBI_CAPTURE:
6342 case V4L2_BUF_TYPE_VBI_OUTPUT:
6343 - return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
6344 + return copy_in_user(&up->fmt.vbi, &kp->fmt.vbi,
6345 + sizeof(kp->fmt.vbi)) ? -EFAULT : 0;
6346 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
6347 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
6348 - return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
6349 + return copy_in_user(&up->fmt.sliced, &kp->fmt.sliced,
6350 + sizeof(kp->fmt.sliced)) ? -EFAULT : 0;
6351 case V4L2_BUF_TYPE_SDR_CAPTURE:
6352 case V4L2_BUF_TYPE_SDR_OUTPUT:
6353 - return put_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr);
6354 + return copy_in_user(&up->fmt.sdr, &kp->fmt.sdr,
6355 + sizeof(kp->fmt.sdr)) ? -EFAULT : 0;
6356 case V4L2_BUF_TYPE_META_CAPTURE:
6357 - return put_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta);
6358 + return copy_in_user(&up->fmt.meta, &kp->fmt.meta,
6359 + sizeof(kp->fmt.meta)) ? -EFAULT : 0;
6360 default:
6361 - pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
6362 - kp->type);
6363 return -EINVAL;
6364 }
6365 }
6366
6367 -static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
6368 +static int put_v4l2_format32(struct v4l2_format __user *kp,
6369 + struct v4l2_format32 __user *up)
6370 {
6371 - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)))
6372 + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)))
6373 return -EFAULT;
6374 return __put_v4l2_format32(kp, up);
6375 }
6376
6377 -static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
6378 +static int put_v4l2_create32(struct v4l2_create_buffers __user *kp,
6379 + struct v4l2_create_buffers32 __user *up)
6380 {
6381 - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
6382 - copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) ||
6383 - copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
6384 + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
6385 + copy_in_user(up, kp,
6386 + offsetof(struct v4l2_create_buffers32, format)) ||
6387 + copy_in_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
6388 return -EFAULT;
6389 return __put_v4l2_format32(&kp->format, &up->format);
6390 }
6391 @@ -317,25 +330,28 @@ struct v4l2_standard32 {
6392 __u32 reserved[4];
6393 };
6394
6395 -static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
6396 +static int get_v4l2_standard32(struct v4l2_standard __user *kp,
6397 + struct v4l2_standard32 __user *up)
6398 {
6399 /* other fields are not set by the user, nor used by the driver */
6400 - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) ||
6401 - get_user(kp->index, &up->index))
6402 + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6403 + assign_in_user(&kp->index, &up->index))
6404 return -EFAULT;
6405 return 0;
6406 }
6407
6408 -static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
6409 +static int put_v4l2_standard32(struct v4l2_standard __user *kp,
6410 + struct v4l2_standard32 __user *up)
6411 {
6412 - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
6413 - put_user(kp->index, &up->index) ||
6414 - put_user(kp->id, &up->id) ||
6415 - copy_to_user(up->name, kp->name, 24) ||
6416 - copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
6417 - put_user(kp->framelines, &up->framelines) ||
6418 - copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
6419 - return -EFAULT;
6420 + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
6421 + assign_in_user(&up->index, &kp->index) ||
6422 + assign_in_user(&up->id, &kp->id) ||
6423 + copy_in_user(up->name, kp->name, sizeof(up->name)) ||
6424 + copy_in_user(&up->frameperiod, &kp->frameperiod,
6425 + sizeof(up->frameperiod)) ||
6426 + assign_in_user(&up->framelines, &kp->framelines) ||
6427 + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
6428 + return -EFAULT;
6429 return 0;
6430 }
6431
6432 @@ -374,136 +390,186 @@ struct v4l2_buffer32 {
6433 __u32 reserved;
6434 };
6435
6436 -static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
6437 - enum v4l2_memory memory)
6438 +static int get_v4l2_plane32(struct v4l2_plane __user *up,
6439 + struct v4l2_plane32 __user *up32,
6440 + enum v4l2_memory memory)
6441 {
6442 - void __user *up_pln;
6443 - compat_long_t p;
6444 + compat_ulong_t p;
6445
6446 if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
6447 - copy_in_user(&up->data_offset, &up32->data_offset,
6448 - sizeof(__u32)))
6449 + copy_in_user(&up->data_offset, &up32->data_offset,
6450 + sizeof(up->data_offset)))
6451 return -EFAULT;
6452
6453 - if (memory == V4L2_MEMORY_USERPTR) {
6454 - if (get_user(p, &up32->m.userptr))
6455 - return -EFAULT;
6456 - up_pln = compat_ptr(p);
6457 - if (put_user((unsigned long)up_pln, &up->m.userptr))
6458 + switch (memory) {
6459 + case V4L2_MEMORY_MMAP:
6460 + case V4L2_MEMORY_OVERLAY:
6461 + if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
6462 + sizeof(up32->m.mem_offset)))
6463 return -EFAULT;
6464 - } else if (memory == V4L2_MEMORY_DMABUF) {
6465 - if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int)))
6466 + break;
6467 + case V4L2_MEMORY_USERPTR:
6468 + if (get_user(p, &up32->m.userptr) ||
6469 + put_user((unsigned long)compat_ptr(p), &up->m.userptr))
6470 return -EFAULT;
6471 - } else {
6472 - if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
6473 - sizeof(__u32)))
6474 + break;
6475 + case V4L2_MEMORY_DMABUF:
6476 + if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(up32->m.fd)))
6477 return -EFAULT;
6478 + break;
6479 }
6480
6481 return 0;
6482 }
6483
6484 -static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
6485 - enum v4l2_memory memory)
6486 +static int put_v4l2_plane32(struct v4l2_plane __user *up,
6487 + struct v4l2_plane32 __user *up32,
6488 + enum v4l2_memory memory)
6489 {
6490 + unsigned long p;
6491 +
6492 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
6493 - copy_in_user(&up32->data_offset, &up->data_offset,
6494 - sizeof(__u32)))
6495 + copy_in_user(&up32->data_offset, &up->data_offset,
6496 + sizeof(up->data_offset)))
6497 return -EFAULT;
6498
6499 - /* For MMAP, driver might've set up the offset, so copy it back.
6500 - * USERPTR stays the same (was userspace-provided), so no copying. */
6501 - if (memory == V4L2_MEMORY_MMAP)
6502 + switch (memory) {
6503 + case V4L2_MEMORY_MMAP:
6504 + case V4L2_MEMORY_OVERLAY:
6505 if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
6506 - sizeof(__u32)))
6507 + sizeof(up->m.mem_offset)))
6508 + return -EFAULT;
6509 + break;
6510 + case V4L2_MEMORY_USERPTR:
6511 + if (get_user(p, &up->m.userptr) ||
6512 + put_user((compat_ulong_t)ptr_to_compat((__force void *)p),
6513 + &up32->m.userptr))
6514 return -EFAULT;
6515 - /* For DMABUF, driver might've set up the fd, so copy it back. */
6516 - if (memory == V4L2_MEMORY_DMABUF)
6517 - if (copy_in_user(&up32->m.fd, &up->m.fd,
6518 - sizeof(int)))
6519 + break;
6520 + case V4L2_MEMORY_DMABUF:
6521 + if (copy_in_user(&up32->m.fd, &up->m.fd, sizeof(up->m.fd)))
6522 return -EFAULT;
6523 + break;
6524 + }
6525 +
6526 + return 0;
6527 +}
6528 +
6529 +static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *up, u32 *size)
6530 +{
6531 + u32 type;
6532 + u32 length;
6533 +
6534 + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6535 + get_user(type, &up->type) ||
6536 + get_user(length, &up->length))
6537 + return -EFAULT;
6538
6539 + if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
6540 + if (length > VIDEO_MAX_PLANES)
6541 + return -EINVAL;
6542 +
6543 + /*
6544 + * We don't really care if userspace decides to kill itself
6545 + * by passing a very big length value
6546 + */
6547 + *size = length * sizeof(struct v4l2_plane);
6548 + } else {
6549 + *size = 0;
6550 + }
6551 return 0;
6552 }
6553
6554 -static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
6555 +static int get_v4l2_buffer32(struct v4l2_buffer __user *kp,
6556 + struct v4l2_buffer32 __user *up,
6557 + void __user *aux_buf, u32 aux_space)
6558 {
6559 + u32 type;
6560 + u32 length;
6561 + enum v4l2_memory memory;
6562 struct v4l2_plane32 __user *uplane32;
6563 struct v4l2_plane __user *uplane;
6564 compat_caddr_t p;
6565 int ret;
6566
6567 - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
6568 - get_user(kp->index, &up->index) ||
6569 - get_user(kp->type, &up->type) ||
6570 - get_user(kp->flags, &up->flags) ||
6571 - get_user(kp->memory, &up->memory) ||
6572 - get_user(kp->length, &up->length))
6573 - return -EFAULT;
6574 + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6575 + assign_in_user(&kp->index, &up->index) ||
6576 + get_user(type, &up->type) ||
6577 + put_user(type, &kp->type) ||
6578 + assign_in_user(&kp->flags, &up->flags) ||
6579 + get_user(memory, &up->memory) ||
6580 + put_user(memory, &kp->memory) ||
6581 + get_user(length, &up->length) ||
6582 + put_user(length, &kp->length))
6583 + return -EFAULT;
6584
6585 - if (V4L2_TYPE_IS_OUTPUT(kp->type))
6586 - if (get_user(kp->bytesused, &up->bytesused) ||
6587 - get_user(kp->field, &up->field) ||
6588 - get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
6589 - get_user(kp->timestamp.tv_usec,
6590 - &up->timestamp.tv_usec))
6591 + if (V4L2_TYPE_IS_OUTPUT(type))
6592 + if (assign_in_user(&kp->bytesused, &up->bytesused) ||
6593 + assign_in_user(&kp->field, &up->field) ||
6594 + assign_in_user(&kp->timestamp.tv_sec,
6595 + &up->timestamp.tv_sec) ||
6596 + assign_in_user(&kp->timestamp.tv_usec,
6597 + &up->timestamp.tv_usec))
6598 return -EFAULT;
6599
6600 - if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
6601 - unsigned int num_planes;
6602 + if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
6603 + u32 num_planes = length;
6604
6605 - if (kp->length == 0) {
6606 - kp->m.planes = NULL;
6607 - /* num_planes == 0 is legal, e.g. when userspace doesn't
6608 - * need planes array on DQBUF*/
6609 - return 0;
6610 - } else if (kp->length > VIDEO_MAX_PLANES) {
6611 - return -EINVAL;
6612 + if (num_planes == 0) {
6613 + /*
6614 + * num_planes == 0 is legal, e.g. when userspace doesn't
6615 + * need planes array on DQBUF
6616 + */
6617 + return put_user(NULL, &kp->m.planes);
6618 }
6619 + if (num_planes > VIDEO_MAX_PLANES)
6620 + return -EINVAL;
6621
6622 if (get_user(p, &up->m.planes))
6623 return -EFAULT;
6624
6625 uplane32 = compat_ptr(p);
6626 if (!access_ok(VERIFY_READ, uplane32,
6627 - kp->length * sizeof(struct v4l2_plane32)))
6628 + num_planes * sizeof(*uplane32)))
6629 return -EFAULT;
6630
6631 - /* We don't really care if userspace decides to kill itself
6632 - * by passing a very big num_planes value */
6633 - uplane = compat_alloc_user_space(kp->length *
6634 - sizeof(struct v4l2_plane));
6635 - kp->m.planes = (__force struct v4l2_plane *)uplane;
6636 + /*
6637 + * We don't really care if userspace decides to kill itself
6638 + * by passing a very big num_planes value
6639 + */
6640 + if (aux_space < num_planes * sizeof(*uplane))
6641 + return -EFAULT;
6642
6643 - for (num_planes = 0; num_planes < kp->length; num_planes++) {
6644 - ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
6645 + uplane = aux_buf;
6646 + if (put_user((__force struct v4l2_plane *)uplane,
6647 + &kp->m.planes))
6648 + return -EFAULT;
6649 +
6650 + while (num_planes--) {
6651 + ret = get_v4l2_plane32(uplane, uplane32, memory);
6652 if (ret)
6653 return ret;
6654 - ++uplane;
6655 - ++uplane32;
6656 + uplane++;
6657 + uplane32++;
6658 }
6659 } else {
6660 - switch (kp->memory) {
6661 + switch (memory) {
6662 case V4L2_MEMORY_MMAP:
6663 - if (get_user(kp->m.offset, &up->m.offset))
6664 + case V4L2_MEMORY_OVERLAY:
6665 + if (assign_in_user(&kp->m.offset, &up->m.offset))
6666 return -EFAULT;
6667 break;
6668 - case V4L2_MEMORY_USERPTR:
6669 - {
6670 - compat_long_t tmp;
6671 + case V4L2_MEMORY_USERPTR: {
6672 + compat_ulong_t userptr;
6673
6674 - if (get_user(tmp, &up->m.userptr))
6675 - return -EFAULT;
6676 -
6677 - kp->m.userptr = (unsigned long)compat_ptr(tmp);
6678 - }
6679 - break;
6680 - case V4L2_MEMORY_OVERLAY:
6681 - if (get_user(kp->m.offset, &up->m.offset))
6682 + if (get_user(userptr, &up->m.userptr) ||
6683 + put_user((unsigned long)compat_ptr(userptr),
6684 + &kp->m.userptr))
6685 return -EFAULT;
6686 break;
6687 + }
6688 case V4L2_MEMORY_DMABUF:
6689 - if (get_user(kp->m.fd, &up->m.fd))
6690 + if (assign_in_user(&kp->m.fd, &up->m.fd))
6691 return -EFAULT;
6692 break;
6693 }
6694 @@ -512,65 +578,70 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
6695 return 0;
6696 }
6697
6698 -static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
6699 +static int put_v4l2_buffer32(struct v4l2_buffer __user *kp,
6700 + struct v4l2_buffer32 __user *up)
6701 {
6702 + u32 type;
6703 + u32 length;
6704 + enum v4l2_memory memory;
6705 struct v4l2_plane32 __user *uplane32;
6706 struct v4l2_plane __user *uplane;
6707 compat_caddr_t p;
6708 - int num_planes;
6709 int ret;
6710
6711 - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) ||
6712 - put_user(kp->index, &up->index) ||
6713 - put_user(kp->type, &up->type) ||
6714 - put_user(kp->flags, &up->flags) ||
6715 - put_user(kp->memory, &up->memory))
6716 - return -EFAULT;
6717 + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
6718 + assign_in_user(&up->index, &kp->index) ||
6719 + get_user(type, &kp->type) ||
6720 + put_user(type, &up->type) ||
6721 + assign_in_user(&up->flags, &kp->flags) ||
6722 + get_user(memory, &kp->memory) ||
6723 + put_user(memory, &up->memory))
6724 + return -EFAULT;
6725
6726 - if (put_user(kp->bytesused, &up->bytesused) ||
6727 - put_user(kp->field, &up->field) ||
6728 - put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
6729 - put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
6730 - copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
6731 - put_user(kp->sequence, &up->sequence) ||
6732 - put_user(kp->reserved2, &up->reserved2) ||
6733 - put_user(kp->reserved, &up->reserved) ||
6734 - put_user(kp->length, &up->length))
6735 - return -EFAULT;
6736 + if (assign_in_user(&up->bytesused, &kp->bytesused) ||
6737 + assign_in_user(&up->field, &kp->field) ||
6738 + assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) ||
6739 + assign_in_user(&up->timestamp.tv_usec, &kp->timestamp.tv_usec) ||
6740 + copy_in_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) ||
6741 + assign_in_user(&up->sequence, &kp->sequence) ||
6742 + assign_in_user(&up->reserved2, &kp->reserved2) ||
6743 + assign_in_user(&up->reserved, &kp->reserved) ||
6744 + get_user(length, &kp->length) ||
6745 + put_user(length, &up->length))
6746 + return -EFAULT;
6747 +
6748 + if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
6749 + u32 num_planes = length;
6750
6751 - if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
6752 - num_planes = kp->length;
6753 if (num_planes == 0)
6754 return 0;
6755
6756 - uplane = (__force struct v4l2_plane __user *)kp->m.planes;
6757 + if (get_user(uplane, ((__force struct v4l2_plane __user **)&kp->m.planes)))
6758 + return -EFAULT;
6759 if (get_user(p, &up->m.planes))
6760 return -EFAULT;
6761 uplane32 = compat_ptr(p);
6762
6763 - while (--num_planes >= 0) {
6764 - ret = put_v4l2_plane32(uplane, uplane32, kp->memory);
6765 + while (num_planes--) {
6766 + ret = put_v4l2_plane32(uplane, uplane32, memory);
6767 if (ret)
6768 return ret;
6769 ++uplane;
6770 ++uplane32;
6771 }
6772 } else {
6773 - switch (kp->memory) {
6774 + switch (memory) {
6775 case V4L2_MEMORY_MMAP:
6776 - if (put_user(kp->m.offset, &up->m.offset))
6777 + case V4L2_MEMORY_OVERLAY:
6778 + if (assign_in_user(&up->m.offset, &kp->m.offset))
6779 return -EFAULT;
6780 break;
6781 case V4L2_MEMORY_USERPTR:
6782 - if (put_user(kp->m.userptr, &up->m.userptr))
6783 - return -EFAULT;
6784 - break;
6785 - case V4L2_MEMORY_OVERLAY:
6786 - if (put_user(kp->m.offset, &up->m.offset))
6787 + if (assign_in_user(&up->m.userptr, &kp->m.userptr))
6788 return -EFAULT;
6789 break;
6790 case V4L2_MEMORY_DMABUF:
6791 - if (put_user(kp->m.fd, &up->m.fd))
6792 + if (assign_in_user(&up->m.fd, &kp->m.fd))
6793 return -EFAULT;
6794 break;
6795 }
6796 @@ -595,30 +666,33 @@ struct v4l2_framebuffer32 {
6797 } fmt;
6798 };
6799
6800 -static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
6801 +static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
6802 + struct v4l2_framebuffer32 __user *up)
6803 {
6804 - u32 tmp;
6805 -
6806 - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) ||
6807 - get_user(tmp, &up->base) ||
6808 - get_user(kp->capability, &up->capability) ||
6809 - get_user(kp->flags, &up->flags) ||
6810 - copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
6811 - return -EFAULT;
6812 - kp->base = (__force void *)compat_ptr(tmp);
6813 + compat_caddr_t tmp;
6814 +
6815 + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6816 + get_user(tmp, &up->base) ||
6817 + put_user((__force void *)compat_ptr(tmp), &kp->base) ||
6818 + assign_in_user(&kp->capability, &up->capability) ||
6819 + assign_in_user(&kp->flags, &up->flags) ||
6820 + copy_in_user(&kp->fmt, &up->fmt, sizeof(kp->fmt)))
6821 + return -EFAULT;
6822 return 0;
6823 }
6824
6825 -static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
6826 +static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
6827 + struct v4l2_framebuffer32 __user *up)
6828 {
6829 - u32 tmp = (u32)((unsigned long)kp->base);
6830 -
6831 - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) ||
6832 - put_user(tmp, &up->base) ||
6833 - put_user(kp->capability, &up->capability) ||
6834 - put_user(kp->flags, &up->flags) ||
6835 - copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt)))
6836 - return -EFAULT;
6837 + void *base;
6838 +
6839 + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
6840 + get_user(base, &kp->base) ||
6841 + put_user(ptr_to_compat(base), &up->base) ||
6842 + assign_in_user(&up->capability, &kp->capability) ||
6843 + assign_in_user(&up->flags, &kp->flags) ||
6844 + copy_in_user(&up->fmt, &kp->fmt, sizeof(kp->fmt)))
6845 + return -EFAULT;
6846 return 0;
6847 }
6848
6849 @@ -634,18 +708,22 @@ struct v4l2_input32 {
6850 __u32 reserved[3];
6851 };
6852
6853 -/* The 64-bit v4l2_input struct has extra padding at the end of the struct.
6854 - Otherwise it is identical to the 32-bit version. */
6855 -static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
6856 +/*
6857 + * The 64-bit v4l2_input struct has extra padding at the end of the struct.
6858 + * Otherwise it is identical to the 32-bit version.
6859 + */
6860 +static inline int get_v4l2_input32(struct v4l2_input __user *kp,
6861 + struct v4l2_input32 __user *up)
6862 {
6863 - if (copy_from_user(kp, up, sizeof(struct v4l2_input32)))
6864 + if (copy_in_user(kp, up, sizeof(*up)))
6865 return -EFAULT;
6866 return 0;
6867 }
6868
6869 -static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
6870 +static inline int put_v4l2_input32(struct v4l2_input __user *kp,
6871 + struct v4l2_input32 __user *up)
6872 {
6873 - if (copy_to_user(up, kp, sizeof(struct v4l2_input32)))
6874 + if (copy_in_user(up, kp, sizeof(*up)))
6875 return -EFAULT;
6876 return 0;
6877 }
6878 @@ -669,60 +747,95 @@ struct v4l2_ext_control32 {
6879 };
6880 } __attribute__ ((packed));
6881
6882 -/* The following function really belong in v4l2-common, but that causes
6883 - a circular dependency between modules. We need to think about this, but
6884 - for now this will do. */
6885 -
6886 -/* Return non-zero if this control is a pointer type. Currently only
6887 - type STRING is a pointer type. */
6888 -static inline int ctrl_is_pointer(u32 id)
6889 +/* Return true if this control is a pointer type. */
6890 +static inline bool ctrl_is_pointer(struct file *file, u32 id)
6891 {
6892 - switch (id) {
6893 - case V4L2_CID_RDS_TX_PS_NAME:
6894 - case V4L2_CID_RDS_TX_RADIO_TEXT:
6895 - return 1;
6896 - default:
6897 - return 0;
6898 + struct video_device *vdev = video_devdata(file);
6899 + struct v4l2_fh *fh = NULL;
6900 + struct v4l2_ctrl_handler *hdl = NULL;
6901 + struct v4l2_query_ext_ctrl qec = { id };
6902 + const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
6903 +
6904 + if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
6905 + fh = file->private_data;
6906 +
6907 + if (fh && fh->ctrl_handler)
6908 + hdl = fh->ctrl_handler;
6909 + else if (vdev->ctrl_handler)
6910 + hdl = vdev->ctrl_handler;
6911 +
6912 + if (hdl) {
6913 + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id);
6914 +
6915 + return ctrl && ctrl->is_ptr;
6916 }
6917 +
6918 + if (!ops || !ops->vidioc_query_ext_ctrl)
6919 + return false;
6920 +
6921 + return !ops->vidioc_query_ext_ctrl(file, fh, &qec) &&
6922 + (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD);
6923 +}
6924 +
6925 +static int bufsize_v4l2_ext_controls(struct v4l2_ext_controls32 __user *up,
6926 + u32 *size)
6927 +{
6928 + u32 count;
6929 +
6930 + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6931 + get_user(count, &up->count))
6932 + return -EFAULT;
6933 + if (count > V4L2_CID_MAX_CTRLS)
6934 + return -EINVAL;
6935 + *size = count * sizeof(struct v4l2_ext_control);
6936 + return 0;
6937 }
6938
6939 -static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
6940 +static int get_v4l2_ext_controls32(struct file *file,
6941 + struct v4l2_ext_controls __user *kp,
6942 + struct v4l2_ext_controls32 __user *up,
6943 + void __user *aux_buf, u32 aux_space)
6944 {
6945 struct v4l2_ext_control32 __user *ucontrols;
6946 struct v4l2_ext_control __user *kcontrols;
6947 - unsigned int n;
6948 + u32 count;
6949 + u32 n;
6950 compat_caddr_t p;
6951
6952 - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
6953 - get_user(kp->which, &up->which) ||
6954 - get_user(kp->count, &up->count) ||
6955 - get_user(kp->error_idx, &up->error_idx) ||
6956 - copy_from_user(kp->reserved, up->reserved,
6957 - sizeof(kp->reserved)))
6958 - return -EFAULT;
6959 - if (kp->count == 0) {
6960 - kp->controls = NULL;
6961 - return 0;
6962 - } else if (kp->count > V4L2_CID_MAX_CTRLS) {
6963 + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
6964 + assign_in_user(&kp->which, &up->which) ||
6965 + get_user(count, &up->count) ||
6966 + put_user(count, &kp->count) ||
6967 + assign_in_user(&kp->error_idx, &up->error_idx) ||
6968 + copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
6969 + return -EFAULT;
6970 +
6971 + if (count == 0)
6972 + return put_user(NULL, &kp->controls);
6973 + if (count > V4L2_CID_MAX_CTRLS)
6974 return -EINVAL;
6975 - }
6976 if (get_user(p, &up->controls))
6977 return -EFAULT;
6978 ucontrols = compat_ptr(p);
6979 - if (!access_ok(VERIFY_READ, ucontrols,
6980 - kp->count * sizeof(struct v4l2_ext_control32)))
6981 + if (!access_ok(VERIFY_READ, ucontrols, count * sizeof(*ucontrols)))
6982 return -EFAULT;
6983 - kcontrols = compat_alloc_user_space(kp->count *
6984 - sizeof(struct v4l2_ext_control));
6985 - kp->controls = (__force struct v4l2_ext_control *)kcontrols;
6986 - for (n = 0; n < kp->count; n++) {
6987 + if (aux_space < count * sizeof(*kcontrols))
6988 + return -EFAULT;
6989 + kcontrols = aux_buf;
6990 + if (put_user((__force struct v4l2_ext_control *)kcontrols,
6991 + &kp->controls))
6992 + return -EFAULT;
6993 +
6994 + for (n = 0; n < count; n++) {
6995 u32 id;
6996
6997 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
6998 return -EFAULT;
6999 +
7000 if (get_user(id, &kcontrols->id))
7001 return -EFAULT;
7002 - if (ctrl_is_pointer(id)) {
7003 +
7004 + if (ctrl_is_pointer(file, id)) {
7005 void __user *s;
7006
7007 if (get_user(p, &ucontrols->string))
7008 @@ -737,43 +850,55 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
7009 return 0;
7010 }
7011
7012 -static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
7013 +static int put_v4l2_ext_controls32(struct file *file,
7014 + struct v4l2_ext_controls __user *kp,
7015 + struct v4l2_ext_controls32 __user *up)
7016 {
7017 struct v4l2_ext_control32 __user *ucontrols;
7018 - struct v4l2_ext_control __user *kcontrols =
7019 - (__force struct v4l2_ext_control __user *)kp->controls;
7020 - int n = kp->count;
7021 + struct v4l2_ext_control __user *kcontrols;
7022 + u32 count;
7023 + u32 n;
7024 compat_caddr_t p;
7025
7026 - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) ||
7027 - put_user(kp->which, &up->which) ||
7028 - put_user(kp->count, &up->count) ||
7029 - put_user(kp->error_idx, &up->error_idx) ||
7030 - copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
7031 - return -EFAULT;
7032 - if (!kp->count)
7033 - return 0;
7034 + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
7035 + assign_in_user(&up->which, &kp->which) ||
7036 + get_user(count, &kp->count) ||
7037 + put_user(count, &up->count) ||
7038 + assign_in_user(&up->error_idx, &kp->error_idx) ||
7039 + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)) ||
7040 + get_user(kcontrols, &kp->controls))
7041 + return -EFAULT;
7042
7043 + if (!count)
7044 + return 0;
7045 if (get_user(p, &up->controls))
7046 return -EFAULT;
7047 ucontrols = compat_ptr(p);
7048 - if (!access_ok(VERIFY_WRITE, ucontrols,
7049 - n * sizeof(struct v4l2_ext_control32)))
7050 + if (!access_ok(VERIFY_WRITE, ucontrols, count * sizeof(*ucontrols)))
7051 return -EFAULT;
7052
7053 - while (--n >= 0) {
7054 - unsigned size = sizeof(*ucontrols);
7055 + for (n = 0; n < count; n++) {
7056 + unsigned int size = sizeof(*ucontrols);
7057 u32 id;
7058
7059 - if (get_user(id, &kcontrols->id))
7060 + if (get_user(id, &kcontrols->id) ||
7061 + put_user(id, &ucontrols->id) ||
7062 + assign_in_user(&ucontrols->size, &kcontrols->size) ||
7063 + copy_in_user(&ucontrols->reserved2, &kcontrols->reserved2,
7064 + sizeof(ucontrols->reserved2)))
7065 return -EFAULT;
7066 - /* Do not modify the pointer when copying a pointer control.
7067 - The contents of the pointer was changed, not the pointer
7068 - itself. */
7069 - if (ctrl_is_pointer(id))
7070 +
7071 + /*
7072 + * Do not modify the pointer when copying a pointer control.
7073 + * The contents of the pointer was changed, not the pointer
7074 + * itself.
7075 + */
7076 + if (ctrl_is_pointer(file, id))
7077 size -= sizeof(ucontrols->value64);
7078 +
7079 if (copy_in_user(ucontrols, kcontrols, size))
7080 return -EFAULT;
7081 +
7082 ucontrols++;
7083 kcontrols++;
7084 }
7085 @@ -793,18 +918,19 @@ struct v4l2_event32 {
7086 __u32 reserved[8];
7087 };
7088
7089 -static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
7090 +static int put_v4l2_event32(struct v4l2_event __user *kp,
7091 + struct v4l2_event32 __user *up)
7092 {
7093 - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
7094 - put_user(kp->type, &up->type) ||
7095 - copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
7096 - put_user(kp->pending, &up->pending) ||
7097 - put_user(kp->sequence, &up->sequence) ||
7098 - put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
7099 - put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
7100 - put_user(kp->id, &up->id) ||
7101 - copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
7102 - return -EFAULT;
7103 + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
7104 + assign_in_user(&up->type, &kp->type) ||
7105 + copy_in_user(&up->u, &kp->u, sizeof(kp->u)) ||
7106 + assign_in_user(&up->pending, &kp->pending) ||
7107 + assign_in_user(&up->sequence, &kp->sequence) ||
7108 + assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) ||
7109 + assign_in_user(&up->timestamp.tv_nsec, &kp->timestamp.tv_nsec) ||
7110 + assign_in_user(&up->id, &kp->id) ||
7111 + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
7112 + return -EFAULT;
7113 return 0;
7114 }
7115
7116 @@ -816,32 +942,35 @@ struct v4l2_edid32 {
7117 compat_caddr_t edid;
7118 };
7119
7120 -static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
7121 +static int get_v4l2_edid32(struct v4l2_edid __user *kp,
7122 + struct v4l2_edid32 __user *up)
7123 {
7124 - u32 tmp;
7125 -
7126 - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) ||
7127 - get_user(kp->pad, &up->pad) ||
7128 - get_user(kp->start_block, &up->start_block) ||
7129 - get_user(kp->blocks, &up->blocks) ||
7130 - get_user(tmp, &up->edid) ||
7131 - copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
7132 - return -EFAULT;
7133 - kp->edid = (__force u8 *)compat_ptr(tmp);
7134 + compat_uptr_t tmp;
7135 +
7136 + if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
7137 + assign_in_user(&kp->pad, &up->pad) ||
7138 + assign_in_user(&kp->start_block, &up->start_block) ||
7139 + assign_in_user(&kp->blocks, &up->blocks) ||
7140 + get_user(tmp, &up->edid) ||
7141 + put_user(compat_ptr(tmp), &kp->edid) ||
7142 + copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
7143 + return -EFAULT;
7144 return 0;
7145 }
7146
7147 -static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
7148 +static int put_v4l2_edid32(struct v4l2_edid __user *kp,
7149 + struct v4l2_edid32 __user *up)
7150 {
7151 - u32 tmp = (u32)((unsigned long)kp->edid);
7152 -
7153 - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) ||
7154 - put_user(kp->pad, &up->pad) ||
7155 - put_user(kp->start_block, &up->start_block) ||
7156 - put_user(kp->blocks, &up->blocks) ||
7157 - put_user(tmp, &up->edid) ||
7158 - copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
7159 - return -EFAULT;
7160 + void *edid;
7161 +
7162 + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
7163 + assign_in_user(&up->pad, &kp->pad) ||
7164 + assign_in_user(&up->start_block, &kp->start_block) ||
7165 + assign_in_user(&up->blocks, &kp->blocks) ||
7166 + get_user(edid, &kp->edid) ||
7167 + put_user(ptr_to_compat(edid), &up->edid) ||
7168 + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
7169 + return -EFAULT;
7170 return 0;
7171 }
7172
7173 @@ -873,22 +1002,23 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
7174 #define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32)
7175 #define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32)
7176
7177 +static int alloc_userspace(unsigned int size, u32 aux_space,
7178 + void __user **up_native)
7179 +{
7180 + *up_native = compat_alloc_user_space(size + aux_space);
7181 + if (!*up_native)
7182 + return -ENOMEM;
7183 + if (clear_user(*up_native, size))
7184 + return -EFAULT;
7185 + return 0;
7186 +}
7187 +
7188 static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7189 {
7190 - union {
7191 - struct v4l2_format v2f;
7192 - struct v4l2_buffer v2b;
7193 - struct v4l2_framebuffer v2fb;
7194 - struct v4l2_input v2i;
7195 - struct v4l2_standard v2s;
7196 - struct v4l2_ext_controls v2ecs;
7197 - struct v4l2_event v2ev;
7198 - struct v4l2_create_buffers v2crt;
7199 - struct v4l2_edid v2edid;
7200 - unsigned long vx;
7201 - int vi;
7202 - } karg;
7203 void __user *up = compat_ptr(arg);
7204 + void __user *up_native = NULL;
7205 + void __user *aux_buf;
7206 + u32 aux_space;
7207 int compatible_arg = 1;
7208 long err = 0;
7209
7210 @@ -927,30 +1057,52 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
7211 case VIDIOC_STREAMOFF:
7212 case VIDIOC_S_INPUT:
7213 case VIDIOC_S_OUTPUT:
7214 - err = get_user(karg.vi, (s32 __user *)up);
7215 + err = alloc_userspace(sizeof(unsigned int), 0, &up_native);
7216 + if (!err && assign_in_user((unsigned int __user *)up_native,
7217 + (compat_uint_t __user *)up))
7218 + err = -EFAULT;
7219 compatible_arg = 0;
7220 break;
7221
7222 case VIDIOC_G_INPUT:
7223 case VIDIOC_G_OUTPUT:
7224 + err = alloc_userspace(sizeof(unsigned int), 0, &up_native);
7225 compatible_arg = 0;
7226 break;
7227
7228 case VIDIOC_G_EDID:
7229 case VIDIOC_S_EDID:
7230 - err = get_v4l2_edid32(&karg.v2edid, up);
7231 + err = alloc_userspace(sizeof(struct v4l2_edid), 0, &up_native);
7232 + if (!err)
7233 + err = get_v4l2_edid32(up_native, up);
7234 compatible_arg = 0;
7235 break;
7236
7237 case VIDIOC_G_FMT:
7238 case VIDIOC_S_FMT:
7239 case VIDIOC_TRY_FMT:
7240 - err = get_v4l2_format32(&karg.v2f, up);
7241 + err = bufsize_v4l2_format(up, &aux_space);
7242 + if (!err)
7243 + err = alloc_userspace(sizeof(struct v4l2_format),
7244 + aux_space, &up_native);
7245 + if (!err) {
7246 + aux_buf = up_native + sizeof(struct v4l2_format);
7247 + err = get_v4l2_format32(up_native, up,
7248 + aux_buf, aux_space);
7249 + }
7250 compatible_arg = 0;
7251 break;
7252
7253 case VIDIOC_CREATE_BUFS:
7254 - err = get_v4l2_create32(&karg.v2crt, up);
7255 + err = bufsize_v4l2_create(up, &aux_space);
7256 + if (!err)
7257 + err = alloc_userspace(sizeof(struct v4l2_create_buffers),
7258 + aux_space, &up_native);
7259 + if (!err) {
7260 + aux_buf = up_native + sizeof(struct v4l2_create_buffers);
7261 + err = get_v4l2_create32(up_native, up,
7262 + aux_buf, aux_space);
7263 + }
7264 compatible_arg = 0;
7265 break;
7266
7267 @@ -958,36 +1110,63 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
7268 case VIDIOC_QUERYBUF:
7269 case VIDIOC_QBUF:
7270 case VIDIOC_DQBUF:
7271 - err = get_v4l2_buffer32(&karg.v2b, up);
7272 + err = bufsize_v4l2_buffer(up, &aux_space);
7273 + if (!err)
7274 + err = alloc_userspace(sizeof(struct v4l2_buffer),
7275 + aux_space, &up_native);
7276 + if (!err) {
7277 + aux_buf = up_native + sizeof(struct v4l2_buffer);
7278 + err = get_v4l2_buffer32(up_native, up,
7279 + aux_buf, aux_space);
7280 + }
7281 compatible_arg = 0;
7282 break;
7283
7284 case VIDIOC_S_FBUF:
7285 - err = get_v4l2_framebuffer32(&karg.v2fb, up);
7286 + err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
7287 + &up_native);
7288 + if (!err)
7289 + err = get_v4l2_framebuffer32(up_native, up);
7290 compatible_arg = 0;
7291 break;
7292
7293 case VIDIOC_G_FBUF:
7294 + err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
7295 + &up_native);
7296 compatible_arg = 0;
7297 break;
7298
7299 case VIDIOC_ENUMSTD:
7300 - err = get_v4l2_standard32(&karg.v2s, up);
7301 + err = alloc_userspace(sizeof(struct v4l2_standard), 0,
7302 + &up_native);
7303 + if (!err)
7304 + err = get_v4l2_standard32(up_native, up);
7305 compatible_arg = 0;
7306 break;
7307
7308 case VIDIOC_ENUMINPUT:
7309 - err = get_v4l2_input32(&karg.v2i, up);
7310 + err = alloc_userspace(sizeof(struct v4l2_input), 0, &up_native);
7311 + if (!err)
7312 + err = get_v4l2_input32(up_native, up);
7313 compatible_arg = 0;
7314 break;
7315
7316 case VIDIOC_G_EXT_CTRLS:
7317 case VIDIOC_S_EXT_CTRLS:
7318 case VIDIOC_TRY_EXT_CTRLS:
7319 - err = get_v4l2_ext_controls32(&karg.v2ecs, up);
7320 + err = bufsize_v4l2_ext_controls(up, &aux_space);
7321 + if (!err)
7322 + err = alloc_userspace(sizeof(struct v4l2_ext_controls),
7323 + aux_space, &up_native);
7324 + if (!err) {
7325 + aux_buf = up_native + sizeof(struct v4l2_ext_controls);
7326 + err = get_v4l2_ext_controls32(file, up_native, up,
7327 + aux_buf, aux_space);
7328 + }
7329 compatible_arg = 0;
7330 break;
7331 case VIDIOC_DQEVENT:
7332 + err = alloc_userspace(sizeof(struct v4l2_event), 0, &up_native);
7333 compatible_arg = 0;
7334 break;
7335 }
7336 @@ -996,26 +1175,26 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
7337
7338 if (compatible_arg)
7339 err = native_ioctl(file, cmd, (unsigned long)up);
7340 - else {
7341 - mm_segment_t old_fs = get_fs();
7342 + else
7343 + err = native_ioctl(file, cmd, (unsigned long)up_native);
7344
7345 - set_fs(KERNEL_DS);
7346 - err = native_ioctl(file, cmd, (unsigned long)&karg);
7347 - set_fs(old_fs);
7348 - }
7349 + if (err == -ENOTTY)
7350 + return err;
7351
7352 - /* Special case: even after an error we need to put the
7353 - results back for these ioctls since the error_idx will
7354 - contain information on which control failed. */
7355 + /*
7356 + * Special case: even after an error we need to put the
7357 + * results back for these ioctls since the error_idx will
7358 + * contain information on which control failed.
7359 + */
7360 switch (cmd) {
7361 case VIDIOC_G_EXT_CTRLS:
7362 case VIDIOC_S_EXT_CTRLS:
7363 case VIDIOC_TRY_EXT_CTRLS:
7364 - if (put_v4l2_ext_controls32(&karg.v2ecs, up))
7365 + if (put_v4l2_ext_controls32(file, up_native, up))
7366 err = -EFAULT;
7367 break;
7368 case VIDIOC_S_EDID:
7369 - if (put_v4l2_edid32(&karg.v2edid, up))
7370 + if (put_v4l2_edid32(up_native, up))
7371 err = -EFAULT;
7372 break;
7373 }
7374 @@ -1027,43 +1206,46 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
7375 case VIDIOC_S_OUTPUT:
7376 case VIDIOC_G_INPUT:
7377 case VIDIOC_G_OUTPUT:
7378 - err = put_user(((s32)karg.vi), (s32 __user *)up);
7379 + if (assign_in_user((compat_uint_t __user *)up,
7380 + ((unsigned int __user *)up_native)))
7381 + err = -EFAULT;
7382 break;
7383
7384 case VIDIOC_G_FBUF:
7385 - err = put_v4l2_framebuffer32(&karg.v2fb, up);
7386 + err = put_v4l2_framebuffer32(up_native, up);
7387 break;
7388
7389 case VIDIOC_DQEVENT:
7390 - err = put_v4l2_event32(&karg.v2ev, up);
7391 + err = put_v4l2_event32(up_native, up);
7392 break;
7393
7394 case VIDIOC_G_EDID:
7395 - err = put_v4l2_edid32(&karg.v2edid, up);
7396 + err = put_v4l2_edid32(up_native, up);
7397 break;
7398
7399 case VIDIOC_G_FMT:
7400 case VIDIOC_S_FMT:
7401 case VIDIOC_TRY_FMT:
7402 - err = put_v4l2_format32(&karg.v2f, up);
7403 + err = put_v4l2_format32(up_native, up);
7404 break;
7405
7406 case VIDIOC_CREATE_BUFS:
7407 - err = put_v4l2_create32(&karg.v2crt, up);
7408 + err = put_v4l2_create32(up_native, up);
7409 break;
7410
7411 + case VIDIOC_PREPARE_BUF:
7412 case VIDIOC_QUERYBUF:
7413 case VIDIOC_QBUF:
7414 case VIDIOC_DQBUF:
7415 - err = put_v4l2_buffer32(&karg.v2b, up);
7416 + err = put_v4l2_buffer32(up_native, up);
7417 break;
7418
7419 case VIDIOC_ENUMSTD:
7420 - err = put_v4l2_standard32(&karg.v2s, up);
7421 + err = put_v4l2_standard32(up_native, up);
7422 break;
7423
7424 case VIDIOC_ENUMINPUT:
7425 - err = put_v4l2_input32(&karg.v2i, up);
7426 + err = put_v4l2_input32(up_native, up);
7427 break;
7428 }
7429 return err;
7430 diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
7431 index b60a6b0841d1..d06941cc6a55 100644
7432 --- a/drivers/media/v4l2-core/v4l2-ioctl.c
7433 +++ b/drivers/media/v4l2-core/v4l2-ioctl.c
7434 @@ -1308,52 +1308,50 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
7435 struct file *file, void *fh, void *arg)
7436 {
7437 struct v4l2_fmtdesc *p = arg;
7438 - struct video_device *vfd = video_devdata(file);
7439 - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
7440 - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
7441 - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
7442 - bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
7443 - bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
7444 - int ret = -EINVAL;
7445 + int ret = check_fmt(file, p->type);
7446 +
7447 + if (ret)
7448 + return ret;
7449 + ret = -EINVAL;
7450
7451 switch (p->type) {
7452 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
7453 - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_enum_fmt_vid_cap))
7454 + if (unlikely(!ops->vidioc_enum_fmt_vid_cap))
7455 break;
7456 ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
7457 break;
7458 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
7459 - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_cap_mplane))
7460 + if (unlikely(!ops->vidioc_enum_fmt_vid_cap_mplane))
7461 break;
7462 ret = ops->vidioc_enum_fmt_vid_cap_mplane(file, fh, arg);
7463 break;
7464 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
7465 - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_overlay))
7466 + if (unlikely(!ops->vidioc_enum_fmt_vid_overlay))
7467 break;
7468 ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg);
7469 break;
7470 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
7471 - if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out))
7472 + if (unlikely(!ops->vidioc_enum_fmt_vid_out))
7473 break;
7474 ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg);
7475 break;
7476 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
7477 - if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out_mplane))
7478 + if (unlikely(!ops->vidioc_enum_fmt_vid_out_mplane))
7479 break;
7480 ret = ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg);
7481 break;
7482 case V4L2_BUF_TYPE_SDR_CAPTURE:
7483 - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_enum_fmt_sdr_cap))
7484 + if (unlikely(!ops->vidioc_enum_fmt_sdr_cap))
7485 break;
7486 ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg);
7487 break;
7488 case V4L2_BUF_TYPE_SDR_OUTPUT:
7489 - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_enum_fmt_sdr_out))
7490 + if (unlikely(!ops->vidioc_enum_fmt_sdr_out))
7491 break;
7492 ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg);
7493 break;
7494 case V4L2_BUF_TYPE_META_CAPTURE:
7495 - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_meta_cap))
7496 + if (unlikely(!ops->vidioc_enum_fmt_meta_cap))
7497 break;
7498 ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg);
7499 break;
7500 @@ -1367,13 +1365,10 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
7501 struct file *file, void *fh, void *arg)
7502 {
7503 struct v4l2_format *p = arg;
7504 - struct video_device *vfd = video_devdata(file);
7505 - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
7506 - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
7507 - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
7508 - bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
7509 - bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
7510 - int ret;
7511 + int ret = check_fmt(file, p->type);
7512 +
7513 + if (ret)
7514 + return ret;
7515
7516 /*
7517 * fmt can't be cleared for these overlay types due to the 'clips'
7518 @@ -1401,7 +1396,7 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
7519
7520 switch (p->type) {
7521 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
7522 - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_g_fmt_vid_cap))
7523 + if (unlikely(!ops->vidioc_g_fmt_vid_cap))
7524 break;
7525 p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7526 ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
7527 @@ -1409,23 +1404,15 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
7528 p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7529 return ret;
7530 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
7531 - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_cap_mplane))
7532 - break;
7533 return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
7534 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
7535 - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_overlay))
7536 - break;
7537 return ops->vidioc_g_fmt_vid_overlay(file, fh, arg);
7538 case V4L2_BUF_TYPE_VBI_CAPTURE:
7539 - if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_vbi_cap))
7540 - break;
7541 return ops->vidioc_g_fmt_vbi_cap(file, fh, arg);
7542 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
7543 - if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_cap))
7544 - break;
7545 return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg);
7546 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
7547 - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out))
7548 + if (unlikely(!ops->vidioc_g_fmt_vid_out))
7549 break;
7550 p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7551 ret = ops->vidioc_g_fmt_vid_out(file, fh, arg);
7552 @@ -1433,32 +1420,18 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
7553 p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7554 return ret;
7555 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
7556 - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_mplane))
7557 - break;
7558 return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg);
7559 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
7560 - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_overlay))
7561 - break;
7562 return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg);
7563 case V4L2_BUF_TYPE_VBI_OUTPUT:
7564 - if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_vbi_out))
7565 - break;
7566 return ops->vidioc_g_fmt_vbi_out(file, fh, arg);
7567 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
7568 - if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_out))
7569 - break;
7570 return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg);
7571 case V4L2_BUF_TYPE_SDR_CAPTURE:
7572 - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_g_fmt_sdr_cap))
7573 - break;
7574 return ops->vidioc_g_fmt_sdr_cap(file, fh, arg);
7575 case V4L2_BUF_TYPE_SDR_OUTPUT:
7576 - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_g_fmt_sdr_out))
7577 - break;
7578 return ops->vidioc_g_fmt_sdr_out(file, fh, arg);
7579 case V4L2_BUF_TYPE_META_CAPTURE:
7580 - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_meta_cap))
7581 - break;
7582 return ops->vidioc_g_fmt_meta_cap(file, fh, arg);
7583 }
7584 return -EINVAL;
7585 @@ -1484,12 +1457,10 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
7586 {
7587 struct v4l2_format *p = arg;
7588 struct video_device *vfd = video_devdata(file);
7589 - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
7590 - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
7591 - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
7592 - bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
7593 - bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
7594 - int ret;
7595 + int ret = check_fmt(file, p->type);
7596 +
7597 + if (ret)
7598 + return ret;
7599
7600 ret = v4l_enable_media_source(vfd);
7601 if (ret)
7602 @@ -1498,37 +1469,37 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
7603
7604 switch (p->type) {
7605 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
7606 - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_s_fmt_vid_cap))
7607 + if (unlikely(!ops->vidioc_s_fmt_vid_cap))
7608 break;
7609 CLEAR_AFTER_FIELD(p, fmt.pix);
7610 ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg);
7611 /* just in case the driver zeroed it again */
7612 p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7613 - if (is_tch)
7614 + if (vfd->vfl_type == VFL_TYPE_TOUCH)
7615 v4l_pix_format_touch(&p->fmt.pix);
7616 return ret;
7617 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
7618 - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap_mplane))
7619 + if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
7620 break;
7621 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
7622 return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
7623 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
7624 - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_overlay))
7625 + if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
7626 break;
7627 CLEAR_AFTER_FIELD(p, fmt.win);
7628 return ops->vidioc_s_fmt_vid_overlay(file, fh, arg);
7629 case V4L2_BUF_TYPE_VBI_CAPTURE:
7630 - if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_vbi_cap))
7631 + if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
7632 break;
7633 CLEAR_AFTER_FIELD(p, fmt.vbi);
7634 return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
7635 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
7636 - if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_cap))
7637 + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
7638 break;
7639 CLEAR_AFTER_FIELD(p, fmt.sliced);
7640 return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
7641 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
7642 - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out))
7643 + if (unlikely(!ops->vidioc_s_fmt_vid_out))
7644 break;
7645 CLEAR_AFTER_FIELD(p, fmt.pix);
7646 ret = ops->vidioc_s_fmt_vid_out(file, fh, arg);
7647 @@ -1536,37 +1507,37 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
7648 p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7649 return ret;
7650 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
7651 - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_mplane))
7652 + if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
7653 break;
7654 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
7655 return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
7656 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
7657 - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_overlay))
7658 + if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
7659 break;
7660 CLEAR_AFTER_FIELD(p, fmt.win);
7661 return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg);
7662 case V4L2_BUF_TYPE_VBI_OUTPUT:
7663 - if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_vbi_out))
7664 + if (unlikely(!ops->vidioc_s_fmt_vbi_out))
7665 break;
7666 CLEAR_AFTER_FIELD(p, fmt.vbi);
7667 return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
7668 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
7669 - if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_out))
7670 + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
7671 break;
7672 CLEAR_AFTER_FIELD(p, fmt.sliced);
7673 return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
7674 case V4L2_BUF_TYPE_SDR_CAPTURE:
7675 - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_s_fmt_sdr_cap))
7676 + if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
7677 break;
7678 CLEAR_AFTER_FIELD(p, fmt.sdr);
7679 return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
7680 case V4L2_BUF_TYPE_SDR_OUTPUT:
7681 - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_s_fmt_sdr_out))
7682 + if (unlikely(!ops->vidioc_s_fmt_sdr_out))
7683 break;
7684 CLEAR_AFTER_FIELD(p, fmt.sdr);
7685 return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
7686 case V4L2_BUF_TYPE_META_CAPTURE:
7687 - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_meta_cap))
7688 + if (unlikely(!ops->vidioc_s_fmt_meta_cap))
7689 break;
7690 CLEAR_AFTER_FIELD(p, fmt.meta);
7691 return ops->vidioc_s_fmt_meta_cap(file, fh, arg);
7692 @@ -1578,19 +1549,16 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
7693 struct file *file, void *fh, void *arg)
7694 {
7695 struct v4l2_format *p = arg;
7696 - struct video_device *vfd = video_devdata(file);
7697 - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
7698 - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
7699 - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
7700 - bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
7701 - bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
7702 - int ret;
7703 + int ret = check_fmt(file, p->type);
7704 +
7705 + if (ret)
7706 + return ret;
7707
7708 v4l_sanitize_format(p);
7709
7710 switch (p->type) {
7711 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
7712 - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_try_fmt_vid_cap))
7713 + if (unlikely(!ops->vidioc_try_fmt_vid_cap))
7714 break;
7715 CLEAR_AFTER_FIELD(p, fmt.pix);
7716 ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg);
7717 @@ -1598,27 +1566,27 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
7718 p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7719 return ret;
7720 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
7721 - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap_mplane))
7722 + if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
7723 break;
7724 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
7725 return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
7726 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
7727 - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_overlay))
7728 + if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
7729 break;
7730 CLEAR_AFTER_FIELD(p, fmt.win);
7731 return ops->vidioc_try_fmt_vid_overlay(file, fh, arg);
7732 case V4L2_BUF_TYPE_VBI_CAPTURE:
7733 - if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_vbi_cap))
7734 + if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
7735 break;
7736 CLEAR_AFTER_FIELD(p, fmt.vbi);
7737 return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
7738 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
7739 - if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_cap))
7740 + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
7741 break;
7742 CLEAR_AFTER_FIELD(p, fmt.sliced);
7743 return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
7744 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
7745 - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out))
7746 + if (unlikely(!ops->vidioc_try_fmt_vid_out))
7747 break;
7748 CLEAR_AFTER_FIELD(p, fmt.pix);
7749 ret = ops->vidioc_try_fmt_vid_out(file, fh, arg);
7750 @@ -1626,37 +1594,37 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
7751 p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
7752 return ret;
7753 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
7754 - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_mplane))
7755 + if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
7756 break;
7757 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
7758 return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
7759 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
7760 - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_overlay))
7761 + if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
7762 break;
7763 CLEAR_AFTER_FIELD(p, fmt.win);
7764 return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg);
7765 case V4L2_BUF_TYPE_VBI_OUTPUT:
7766 - if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_vbi_out))
7767 + if (unlikely(!ops->vidioc_try_fmt_vbi_out))
7768 break;
7769 CLEAR_AFTER_FIELD(p, fmt.vbi);
7770 return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
7771 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
7772 - if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_out))
7773 + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
7774 break;
7775 CLEAR_AFTER_FIELD(p, fmt.sliced);
7776 return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
7777 case V4L2_BUF_TYPE_SDR_CAPTURE:
7778 - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_try_fmt_sdr_cap))
7779 + if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
7780 break;
7781 CLEAR_AFTER_FIELD(p, fmt.sdr);
7782 return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
7783 case V4L2_BUF_TYPE_SDR_OUTPUT:
7784 - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_try_fmt_sdr_out))
7785 + if (unlikely(!ops->vidioc_try_fmt_sdr_out))
7786 break;
7787 CLEAR_AFTER_FIELD(p, fmt.sdr);
7788 return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
7789 case V4L2_BUF_TYPE_META_CAPTURE:
7790 - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_meta_cap))
7791 + if (unlikely(!ops->vidioc_try_fmt_meta_cap))
7792 break;
7793 CLEAR_AFTER_FIELD(p, fmt.meta);
7794 return ops->vidioc_try_fmt_meta_cap(file, fh, arg);
7795 @@ -2924,8 +2892,11 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
7796
7797 /* Handles IOCTL */
7798 err = func(file, cmd, parg);
7799 - if (err == -ENOIOCTLCMD)
7800 + if (err == -ENOTTY || err == -ENOIOCTLCMD) {
7801 err = -ENOTTY;
7802 + goto out;
7803 + }
7804 +
7805 if (err == 0) {
7806 if (cmd == VIDIOC_DQBUF)
7807 trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
7808 diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
7809 index e0eb51d8c012..edf24c148fa6 100644
7810 --- a/drivers/mtd/nand/brcmnand/brcmnand.c
7811 +++ b/drivers/mtd/nand/brcmnand/brcmnand.c
7812 @@ -2193,16 +2193,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
7813 if (ctrl->nand_version >= 0x0702)
7814 tmp |= ACC_CONTROL_RD_ERASED;
7815 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
7816 - if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
7817 - /*
7818 - * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
7819 - * errors
7820 - */
7821 - if (has_flash_dma(ctrl))
7822 - tmp &= ~ACC_CONTROL_PREFETCH;
7823 - else
7824 - tmp |= ACC_CONTROL_PREFETCH;
7825 - }
7826 + if (ctrl->features & BRCMNAND_HAS_PREFETCH)
7827 + tmp &= ~ACC_CONTROL_PREFETCH;
7828 +
7829 nand_writereg(ctrl, offs, tmp);
7830
7831 return 0;
7832 diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
7833 index 3f1d806e590a..a0b0302aea14 100644
7834 --- a/drivers/mtd/nand/nand_base.c
7835 +++ b/drivers/mtd/nand/nand_base.c
7836 @@ -2201,6 +2201,7 @@ EXPORT_SYMBOL(nand_write_oob_syndrome);
7837 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
7838 struct mtd_oob_ops *ops)
7839 {
7840 + unsigned int max_bitflips = 0;
7841 int page, realpage, chipnr;
7842 struct nand_chip *chip = mtd_to_nand(mtd);
7843 struct mtd_ecc_stats stats;
7844 @@ -2258,6 +2259,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
7845 nand_wait_ready(mtd);
7846 }
7847
7848 + max_bitflips = max_t(unsigned int, max_bitflips, ret);
7849 +
7850 readlen -= len;
7851 if (!readlen)
7852 break;
7853 @@ -2283,7 +2286,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
7854 if (mtd->ecc_stats.failed - stats.failed)
7855 return -EBADMSG;
7856
7857 - return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
7858 + return max_bitflips;
7859 }
7860
7861 /**
7862 diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
7863 index 82244be3e766..958974821582 100644
7864 --- a/drivers/mtd/nand/sunxi_nand.c
7865 +++ b/drivers/mtd/nand/sunxi_nand.c
7866 @@ -1853,8 +1853,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
7867
7868 /* Add ECC info retrieval from DT */
7869 for (i = 0; i < ARRAY_SIZE(strengths); i++) {
7870 - if (ecc->strength <= strengths[i])
7871 + if (ecc->strength <= strengths[i]) {
7872 + /*
7873 + * Update ecc->strength value with the actual strength
7874 + * that will be used by the ECC engine.
7875 + */
7876 + ecc->strength = strengths[i];
7877 break;
7878 + }
7879 }
7880
7881 if (i >= ARRAY_SIZE(strengths)) {
7882 diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
7883 index b210fdb31c98..b1fc28f63882 100644
7884 --- a/drivers/mtd/ubi/block.c
7885 +++ b/drivers/mtd/ubi/block.c
7886 @@ -99,6 +99,8 @@ struct ubiblock {
7887
7888 /* Linked list of all ubiblock instances */
7889 static LIST_HEAD(ubiblock_devices);
7890 +static DEFINE_IDR(ubiblock_minor_idr);
7891 +/* Protects ubiblock_devices and ubiblock_minor_idr */
7892 static DEFINE_MUTEX(devices_mutex);
7893 static int ubiblock_major;
7894
7895 @@ -351,8 +353,6 @@ static const struct blk_mq_ops ubiblock_mq_ops = {
7896 .init_request = ubiblock_init_request,
7897 };
7898
7899 -static DEFINE_IDR(ubiblock_minor_idr);
7900 -
7901 int ubiblock_create(struct ubi_volume_info *vi)
7902 {
7903 struct ubiblock *dev;
7904 @@ -365,14 +365,15 @@ int ubiblock_create(struct ubi_volume_info *vi)
7905 /* Check that the volume isn't already handled */
7906 mutex_lock(&devices_mutex);
7907 if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
7908 - mutex_unlock(&devices_mutex);
7909 - return -EEXIST;
7910 + ret = -EEXIST;
7911 + goto out_unlock;
7912 }
7913 - mutex_unlock(&devices_mutex);
7914
7915 dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
7916 - if (!dev)
7917 - return -ENOMEM;
7918 + if (!dev) {
7919 + ret = -ENOMEM;
7920 + goto out_unlock;
7921 + }
7922
7923 mutex_init(&dev->dev_mutex);
7924
7925 @@ -437,14 +438,13 @@ int ubiblock_create(struct ubi_volume_info *vi)
7926 goto out_free_queue;
7927 }
7928
7929 - mutex_lock(&devices_mutex);
7930 list_add_tail(&dev->list, &ubiblock_devices);
7931 - mutex_unlock(&devices_mutex);
7932
7933 /* Must be the last step: anyone can call file ops from now on */
7934 add_disk(dev->gd);
7935 dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
7936 dev->ubi_num, dev->vol_id, vi->name);
7937 + mutex_unlock(&devices_mutex);
7938 return 0;
7939
7940 out_free_queue:
7941 @@ -457,6 +457,8 @@ int ubiblock_create(struct ubi_volume_info *vi)
7942 put_disk(dev->gd);
7943 out_free_dev:
7944 kfree(dev);
7945 +out_unlock:
7946 + mutex_unlock(&devices_mutex);
7947
7948 return ret;
7949 }
7950 @@ -478,30 +480,36 @@ static void ubiblock_cleanup(struct ubiblock *dev)
7951 int ubiblock_remove(struct ubi_volume_info *vi)
7952 {
7953 struct ubiblock *dev;
7954 + int ret;
7955
7956 mutex_lock(&devices_mutex);
7957 dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
7958 if (!dev) {
7959 - mutex_unlock(&devices_mutex);
7960 - return -ENODEV;
7961 + ret = -ENODEV;
7962 + goto out_unlock;
7963 }
7964
7965 /* Found a device, let's lock it so we can check if it's busy */
7966 mutex_lock(&dev->dev_mutex);
7967 if (dev->refcnt > 0) {
7968 - mutex_unlock(&dev->dev_mutex);
7969 - mutex_unlock(&devices_mutex);
7970 - return -EBUSY;
7971 + ret = -EBUSY;
7972 + goto out_unlock_dev;
7973 }
7974
7975 /* Remove from device list */
7976 list_del(&dev->list);
7977 - mutex_unlock(&devices_mutex);
7978 -
7979 ubiblock_cleanup(dev);
7980 mutex_unlock(&dev->dev_mutex);
7981 + mutex_unlock(&devices_mutex);
7982 +
7983 kfree(dev);
7984 return 0;
7985 +
7986 +out_unlock_dev:
7987 + mutex_unlock(&dev->dev_mutex);
7988 +out_unlock:
7989 + mutex_unlock(&devices_mutex);
7990 + return ret;
7991 }
7992
7993 static int ubiblock_resize(struct ubi_volume_info *vi)
7994 @@ -630,6 +638,7 @@ static void ubiblock_remove_all(void)
7995 struct ubiblock *next;
7996 struct ubiblock *dev;
7997
7998 + mutex_lock(&devices_mutex);
7999 list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
8000 /* The module is being forcefully removed */
8001 WARN_ON(dev->desc);
8002 @@ -638,6 +647,7 @@ static void ubiblock_remove_all(void)
8003 ubiblock_cleanup(dev);
8004 kfree(dev);
8005 }
8006 + mutex_unlock(&devices_mutex);
8007 }
8008
8009 int __init ubiblock_init(void)
8010 diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
8011 index 85237cf661f9..3fd8d7ff7a02 100644
8012 --- a/drivers/mtd/ubi/vmt.c
8013 +++ b/drivers/mtd/ubi/vmt.c
8014 @@ -270,6 +270,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
8015 vol->last_eb_bytes = vol->usable_leb_size;
8016 }
8017
8018 + /* Make volume "available" before it becomes accessible via sysfs */
8019 + spin_lock(&ubi->volumes_lock);
8020 + ubi->volumes[vol_id] = vol;
8021 + ubi->vol_count += 1;
8022 + spin_unlock(&ubi->volumes_lock);
8023 +
8024 /* Register character device for the volume */
8025 cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
8026 vol->cdev.owner = THIS_MODULE;
8027 @@ -298,11 +304,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
8028 if (err)
8029 goto out_sysfs;
8030
8031 - spin_lock(&ubi->volumes_lock);
8032 - ubi->volumes[vol_id] = vol;
8033 - ubi->vol_count += 1;
8034 - spin_unlock(&ubi->volumes_lock);
8035 -
8036 ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
8037 self_check_volumes(ubi);
8038 return err;
8039 @@ -315,6 +316,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
8040 */
8041 cdev_device_del(&vol->cdev, &vol->dev);
8042 out_mapping:
8043 + spin_lock(&ubi->volumes_lock);
8044 + ubi->volumes[vol_id] = NULL;
8045 + ubi->vol_count -= 1;
8046 + spin_unlock(&ubi->volumes_lock);
8047 ubi_eba_destroy_table(eba_tbl);
8048 out_acc:
8049 spin_lock(&ubi->volumes_lock);
8050 diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
8051 index b5b8cd6f481c..668b46202507 100644
8052 --- a/drivers/mtd/ubi/wl.c
8053 +++ b/drivers/mtd/ubi/wl.c
8054 @@ -1528,6 +1528,46 @@ static void shutdown_work(struct ubi_device *ubi)
8055 }
8056 }
8057
8058 +/**
8059 + * erase_aeb - erase a PEB given in UBI attach info PEB
8060 + * @ubi: UBI device description object
8061 + * @aeb: UBI attach info PEB
8062 + * @sync: If true, erase synchronously. Otherwise schedule for erasure
8063 + */
8064 +static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
8065 +{
8066 + struct ubi_wl_entry *e;
8067 + int err;
8068 +
8069 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
8070 + if (!e)
8071 + return -ENOMEM;
8072 +
8073 + e->pnum = aeb->pnum;
8074 + e->ec = aeb->ec;
8075 + ubi->lookuptbl[e->pnum] = e;
8076 +
8077 + if (sync) {
8078 + err = sync_erase(ubi, e, false);
8079 + if (err)
8080 + goto out_free;
8081 +
8082 + wl_tree_add(e, &ubi->free);
8083 + ubi->free_count++;
8084 + } else {
8085 + err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
8086 + if (err)
8087 + goto out_free;
8088 + }
8089 +
8090 + return 0;
8091 +
8092 +out_free:
8093 + wl_entry_destroy(ubi, e);
8094 +
8095 + return err;
8096 +}
8097 +
8098 /**
8099 * ubi_wl_init - initialize the WL sub-system using attaching information.
8100 * @ubi: UBI device description object
8101 @@ -1566,18 +1606,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
8102 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
8103 cond_resched();
8104
8105 - e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
8106 - if (!e)
8107 + err = erase_aeb(ubi, aeb, false);
8108 + if (err)
8109 goto out_free;
8110
8111 - e->pnum = aeb->pnum;
8112 - e->ec = aeb->ec;
8113 - ubi->lookuptbl[e->pnum] = e;
8114 - if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
8115 - wl_entry_destroy(ubi, e);
8116 - goto out_free;
8117 - }
8118 -
8119 found_pebs++;
8120 }
8121
8122 @@ -1635,6 +1667,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
8123 ubi_assert(!ubi->lookuptbl[e->pnum]);
8124 ubi->lookuptbl[e->pnum] = e;
8125 } else {
8126 + bool sync = false;
8127 +
8128 /*
8129 * Usually old Fastmap PEBs are scheduled for erasure
8130 * and we don't have to care about them but if we face
8131 @@ -1644,18 +1678,21 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
8132 if (ubi->lookuptbl[aeb->pnum])
8133 continue;
8134
8135 - e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
8136 - if (!e)
8137 - goto out_free;
8138 + /*
8139 + * The fastmap update code might not find a free PEB for
8140 + * writing the fastmap anchor to and then reuses the
8141 + * current fastmap anchor PEB. When this PEB gets erased
8142 + * and a power cut happens before it is written again we
8143 + * must make sure that the fastmap attach code doesn't
8144 + * find any outdated fastmap anchors, hence we erase the
8145 + * outdated fastmap anchor PEBs synchronously here.
8146 + */
8147 + if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
8148 + sync = true;
8149
8150 - e->pnum = aeb->pnum;
8151 - e->ec = aeb->ec;
8152 - ubi_assert(!ubi->lookuptbl[e->pnum]);
8153 - ubi->lookuptbl[e->pnum] = e;
8154 - if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
8155 - wl_entry_destroy(ubi, e);
8156 + err = erase_aeb(ubi, aeb, sync);
8157 + if (err)
8158 goto out_free;
8159 - }
8160 }
8161
8162 found_pebs++;
8163 diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
8164 index 71df0f70b61f..72b4527d690f 100644
8165 --- a/drivers/pinctrl/intel/pinctrl-intel.c
8166 +++ b/drivers/pinctrl/intel/pinctrl-intel.c
8167 @@ -427,6 +427,18 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
8168 writel(value, padcfg0);
8169 }
8170
8171 +static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
8172 +{
8173 + u32 value;
8174 +
8175 + /* Put the pad into GPIO mode */
8176 + value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
8177 + /* Disable SCI/SMI/NMI generation */
8178 + value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
8179 + value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
8180 + writel(value, padcfg0);
8181 +}
8182 +
8183 static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
8184 struct pinctrl_gpio_range *range,
8185 unsigned pin)
8186 @@ -434,7 +446,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
8187 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
8188 void __iomem *padcfg0;
8189 unsigned long flags;
8190 - u32 value;
8191
8192 raw_spin_lock_irqsave(&pctrl->lock, flags);
8193
8194 @@ -444,13 +455,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
8195 }
8196
8197 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
8198 - /* Put the pad into GPIO mode */
8199 - value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
8200 - /* Disable SCI/SMI/NMI generation */
8201 - value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
8202 - value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
8203 - writel(value, padcfg0);
8204 -
8205 + intel_gpio_set_gpio_mode(padcfg0);
8206 /* Disable TX buffer and enable RX (this will be input) */
8207 __intel_gpio_set_direction(padcfg0, true);
8208
8209 @@ -935,6 +940,8 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
8210
8211 raw_spin_lock_irqsave(&pctrl->lock, flags);
8212
8213 + intel_gpio_set_gpio_mode(reg);
8214 +
8215 value = readl(reg);
8216
8217 value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
8218 diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
8219 index 9c950bbf07ba..447763aad815 100644
8220 --- a/drivers/pinctrl/pinctrl-mcp23s08.c
8221 +++ b/drivers/pinctrl/pinctrl-mcp23s08.c
8222 @@ -891,16 +891,16 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
8223 goto fail;
8224 }
8225
8226 - ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
8227 - if (ret < 0)
8228 - goto fail;
8229 -
8230 if (mcp->irq && mcp->irq_controller) {
8231 ret = mcp23s08_irq_setup(mcp);
8232 if (ret)
8233 goto fail;
8234 }
8235
8236 + ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
8237 + if (ret < 0)
8238 + goto fail;
8239 +
8240 mcp->pinctrl_desc.name = "mcp23xxx-pinctrl";
8241 mcp->pinctrl_desc.pctlops = &mcp_pinctrl_ops;
8242 mcp->pinctrl_desc.confops = &mcp_pinconf_ops;
8243 diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
8244 index 7450f5118445..70a0228f4e7f 100644
8245 --- a/drivers/pinctrl/pinctrl-sx150x.c
8246 +++ b/drivers/pinctrl/pinctrl-sx150x.c
8247 @@ -1144,6 +1144,27 @@ static int sx150x_probe(struct i2c_client *client,
8248 if (ret)
8249 return ret;
8250
8251 + /* Pinctrl_desc */
8252 + pctl->pinctrl_desc.name = "sx150x-pinctrl";
8253 + pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops;
8254 + pctl->pinctrl_desc.confops = &sx150x_pinconf_ops;
8255 + pctl->pinctrl_desc.pins = pctl->data->pins;
8256 + pctl->pinctrl_desc.npins = pctl->data->npins;
8257 + pctl->pinctrl_desc.owner = THIS_MODULE;
8258 +
8259 + ret = devm_pinctrl_register_and_init(dev, &pctl->pinctrl_desc,
8260 + pctl, &pctl->pctldev);
8261 + if (ret) {
8262 + dev_err(dev, "Failed to register pinctrl device\n");
8263 + return ret;
8264 + }
8265 +
8266 + ret = pinctrl_enable(pctl->pctldev);
8267 + if (ret) {
8268 + dev_err(dev, "Failed to enable pinctrl device\n");
8269 + return ret;
8270 + }
8271 +
8272 /* Register GPIO controller */
8273 pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
8274 pctl->gpio.base = -1;
8275 @@ -1172,6 +1193,11 @@ static int sx150x_probe(struct i2c_client *client,
8276 if (ret)
8277 return ret;
8278
8279 + ret = gpiochip_add_pin_range(&pctl->gpio, dev_name(dev),
8280 + 0, 0, pctl->data->npins);
8281 + if (ret)
8282 + return ret;
8283 +
8284 /* Add Interrupt support if an irq is specified */
8285 if (client->irq > 0) {
8286 pctl->irq_chip.name = devm_kstrdup(dev, client->name,
8287 @@ -1217,20 +1243,6 @@ static int sx150x_probe(struct i2c_client *client,
8288 client->irq);
8289 }
8290
8291 - /* Pinctrl_desc */
8292 - pctl->pinctrl_desc.name = "sx150x-pinctrl";
8293 - pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops;
8294 - pctl->pinctrl_desc.confops = &sx150x_pinconf_ops;
8295 - pctl->pinctrl_desc.pins = pctl->data->pins;
8296 - pctl->pinctrl_desc.npins = pctl->data->npins;
8297 - pctl->pinctrl_desc.owner = THIS_MODULE;
8298 -
8299 - pctl->pctldev = pinctrl_register(&pctl->pinctrl_desc, dev, pctl);
8300 - if (IS_ERR(pctl->pctldev)) {
8301 - dev_err(dev, "Failed to register pinctrl device\n");
8302 - return PTR_ERR(pctl->pctldev);
8303 - }
8304 -
8305 return 0;
8306 }
8307
8308 diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
8309 index 76b8b7eed0c0..0b6467206f8e 100644
8310 --- a/drivers/scsi/cxlflash/main.c
8311 +++ b/drivers/scsi/cxlflash/main.c
8312 @@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
8313 cmd->parent = afu;
8314 cmd->hwq_index = hwq_index;
8315
8316 + cmd->sa.ioasc = 0;
8317 cmd->rcb.ctx_id = hwq->ctx_hndl;
8318 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
8319 cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
8320 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
8321 index fe3a0da3ec97..57bf43e34863 100644
8322 --- a/drivers/scsi/hosts.c
8323 +++ b/drivers/scsi/hosts.c
8324 @@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev)
8325
8326 scsi_proc_hostdir_rm(shost->hostt);
8327
8328 + /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
8329 + rcu_barrier();
8330 +
8331 if (shost->tmf_work_q)
8332 destroy_workqueue(shost->tmf_work_q);
8333 if (shost->ehandler)
8334 @@ -325,6 +328,8 @@ static void scsi_host_dev_release(struct device *dev)
8335 if (shost->work_q)
8336 destroy_workqueue(shost->work_q);
8337
8338 + destroy_rcu_head(&shost->rcu);
8339 +
8340 if (shost->shost_state == SHOST_CREATED) {
8341 /*
8342 * Free the shost_dev device name here if scsi_host_alloc()
8343 @@ -399,6 +404,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
8344 INIT_LIST_HEAD(&shost->starved_list);
8345 init_waitqueue_head(&shost->host_wait);
8346 mutex_init(&shost->scan_mutex);
8347 + init_rcu_head(&shost->rcu);
8348
8349 index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
8350 if (index < 0)
8351 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
8352 index 6acf1bb1d320..25612ccf6ff2 100644
8353 --- a/drivers/scsi/lpfc/lpfc_init.c
8354 +++ b/drivers/scsi/lpfc/lpfc_init.c
8355 @@ -9413,44 +9413,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
8356 lpfc_sli4_bar0_register_memmap(phba, if_type);
8357 }
8358
8359 - if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
8360 - (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
8361 - /*
8362 - * Map SLI4 if type 0 HBA Control Register base to a kernel
8363 - * virtual address and setup the registers.
8364 - */
8365 - phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
8366 - bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
8367 - phba->sli4_hba.ctrl_regs_memmap_p =
8368 - ioremap(phba->pci_bar1_map, bar1map_len);
8369 - if (!phba->sli4_hba.ctrl_regs_memmap_p) {
8370 - dev_printk(KERN_ERR, &pdev->dev,
8371 - "ioremap failed for SLI4 HBA control registers.\n");
8372 + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
8373 + if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
8374 + /*
8375 + * Map SLI4 if type 0 HBA Control Register base to a
8376 + * kernel virtual address and setup the registers.
8377 + */
8378 + phba->pci_bar1_map = pci_resource_start(pdev,
8379 + PCI_64BIT_BAR2);
8380 + bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
8381 + phba->sli4_hba.ctrl_regs_memmap_p =
8382 + ioremap(phba->pci_bar1_map,
8383 + bar1map_len);
8384 + if (!phba->sli4_hba.ctrl_regs_memmap_p) {
8385 + dev_err(&pdev->dev,
8386 + "ioremap failed for SLI4 HBA "
8387 + "control registers.\n");
8388 + error = -ENOMEM;
8389 + goto out_iounmap_conf;
8390 + }
8391 + phba->pci_bar2_memmap_p =
8392 + phba->sli4_hba.ctrl_regs_memmap_p;
8393 + lpfc_sli4_bar1_register_memmap(phba);
8394 + } else {
8395 + error = -ENOMEM;
8396 goto out_iounmap_conf;
8397 }
8398 - phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
8399 - lpfc_sli4_bar1_register_memmap(phba);
8400 }
8401
8402 - if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
8403 - (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
8404 - /*
8405 - * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
8406 - * virtual address and setup the registers.
8407 - */
8408 - phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
8409 - bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
8410 - phba->sli4_hba.drbl_regs_memmap_p =
8411 - ioremap(phba->pci_bar2_map, bar2map_len);
8412 - if (!phba->sli4_hba.drbl_regs_memmap_p) {
8413 - dev_printk(KERN_ERR, &pdev->dev,
8414 - "ioremap failed for SLI4 HBA doorbell registers.\n");
8415 - goto out_iounmap_ctrl;
8416 - }
8417 - phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
8418 - error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
8419 - if (error)
8420 + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
8421 + if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
8422 + /*
8423 + * Map SLI4 if type 0 HBA Doorbell Register base to
8424 + * a kernel virtual address and setup the registers.
8425 + */
8426 + phba->pci_bar2_map = pci_resource_start(pdev,
8427 + PCI_64BIT_BAR4);
8428 + bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
8429 + phba->sli4_hba.drbl_regs_memmap_p =
8430 + ioremap(phba->pci_bar2_map,
8431 + bar2map_len);
8432 + if (!phba->sli4_hba.drbl_regs_memmap_p) {
8433 + dev_err(&pdev->dev,
8434 + "ioremap failed for SLI4 HBA"
8435 + " doorbell registers.\n");
8436 + error = -ENOMEM;
8437 + goto out_iounmap_ctrl;
8438 + }
8439 + phba->pci_bar4_memmap_p =
8440 + phba->sli4_hba.drbl_regs_memmap_p;
8441 + error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
8442 + if (error)
8443 + goto out_iounmap_all;
8444 + } else {
8445 + error = -ENOMEM;
8446 goto out_iounmap_all;
8447 + }
8448 }
8449
8450 return 0;
8451 diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
8452 index dab876c65473..fa504ba83ade 100644
8453 --- a/drivers/scsi/scsi_error.c
8454 +++ b/drivers/scsi/scsi_error.c
8455 @@ -220,6 +220,17 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
8456 }
8457 }
8458
8459 +static void scsi_eh_inc_host_failed(struct rcu_head *head)
8460 +{
8461 + struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
8462 + unsigned long flags;
8463 +
8464 + spin_lock_irqsave(shost->host_lock, flags);
8465 + shost->host_failed++;
8466 + scsi_eh_wakeup(shost);
8467 + spin_unlock_irqrestore(shost->host_lock, flags);
8468 +}
8469 +
8470 /**
8471 * scsi_eh_scmd_add - add scsi cmd to error handling.
8472 * @scmd: scmd to run eh on.
8473 @@ -242,9 +253,12 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
8474
8475 scsi_eh_reset(scmd);
8476 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
8477 - shost->host_failed++;
8478 - scsi_eh_wakeup(shost);
8479 spin_unlock_irqrestore(shost->host_lock, flags);
8480 + /*
8481 + * Ensure that all tasks observe the host state change before the
8482 + * host_failed change.
8483 + */
8484 + call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
8485 }
8486
8487 /**
8488 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
8489 index 635cfa1f2ace..0d3696e9dddd 100644
8490 --- a/drivers/scsi/scsi_lib.c
8491 +++ b/drivers/scsi/scsi_lib.c
8492 @@ -318,22 +318,39 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
8493 cmd->cmd_len = scsi_command_size(cmd->cmnd);
8494 }
8495
8496 -void scsi_device_unbusy(struct scsi_device *sdev)
8497 +/*
8498 + * Decrement the host_busy counter and wake up the error handler if necessary.
8499 + * Avoid as follows that the error handler is not woken up if shost->host_busy
8500 + * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
8501 + * with an RCU read lock in this function to ensure that this function in its
8502 + * entirety either finishes before scsi_eh_scmd_add() increases the
8503 + * host_failed counter or that it notices the shost state change made by
8504 + * scsi_eh_scmd_add().
8505 + */
8506 +static void scsi_dec_host_busy(struct Scsi_Host *shost)
8507 {
8508 - struct Scsi_Host *shost = sdev->host;
8509 - struct scsi_target *starget = scsi_target(sdev);
8510 unsigned long flags;
8511
8512 + rcu_read_lock();
8513 atomic_dec(&shost->host_busy);
8514 - if (starget->can_queue > 0)
8515 - atomic_dec(&starget->target_busy);
8516 -
8517 - if (unlikely(scsi_host_in_recovery(shost) &&
8518 - (shost->host_failed || shost->host_eh_scheduled))) {
8519 + if (unlikely(scsi_host_in_recovery(shost))) {
8520 spin_lock_irqsave(shost->host_lock, flags);
8521 - scsi_eh_wakeup(shost);
8522 + if (shost->host_failed || shost->host_eh_scheduled)
8523 + scsi_eh_wakeup(shost);
8524 spin_unlock_irqrestore(shost->host_lock, flags);
8525 }
8526 + rcu_read_unlock();
8527 +}
8528 +
8529 +void scsi_device_unbusy(struct scsi_device *sdev)
8530 +{
8531 + struct Scsi_Host *shost = sdev->host;
8532 + struct scsi_target *starget = scsi_target(sdev);
8533 +
8534 + scsi_dec_host_busy(shost);
8535 +
8536 + if (starget->can_queue > 0)
8537 + atomic_dec(&starget->target_busy);
8538
8539 atomic_dec(&sdev->device_busy);
8540 }
8541 @@ -1532,7 +1549,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
8542 list_add_tail(&sdev->starved_entry, &shost->starved_list);
8543 spin_unlock_irq(shost->host_lock);
8544 out_dec:
8545 - atomic_dec(&shost->host_busy);
8546 + scsi_dec_host_busy(shost);
8547 return 0;
8548 }
8549
8550 @@ -1993,7 +2010,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
8551 return BLK_STS_OK;
8552
8553 out_dec_host_busy:
8554 - atomic_dec(&shost->host_busy);
8555 + scsi_dec_host_busy(shost);
8556 out_dec_target_busy:
8557 if (scsi_target(sdev)->can_queue > 0)
8558 atomic_dec(&scsi_target(sdev)->target_busy);
8559 diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
8560 index db0572733712..ab30a0f5129c 100644
8561 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
8562 +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
8563 @@ -119,6 +119,7 @@ static struct shash_alg alg = {
8564 .cra_name = "adler32",
8565 .cra_driver_name = "adler32-zlib",
8566 .cra_priority = 100,
8567 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
8568 .cra_blocksize = CHKSUM_BLOCK_SIZE,
8569 .cra_ctxsize = sizeof(u32),
8570 .cra_module = THIS_MODULE,
8571 diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
8572 index c722cbfdc7e6..3ece1335ba84 100644
8573 --- a/drivers/watchdog/Kconfig
8574 +++ b/drivers/watchdog/Kconfig
8575 @@ -1451,7 +1451,7 @@ config RC32434_WDT
8576
8577 config INDYDOG
8578 tristate "Indy/I2 Hardware Watchdog"
8579 - depends on SGI_HAS_INDYDOG || (MIPS && COMPILE_TEST)
8580 + depends on SGI_HAS_INDYDOG
8581 help
8582 Hardware driver for the Indy's/I2's watchdog. This is a
8583 watchdog timer that will reboot the machine after a 60 second
8584 diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
8585 index cb66c2f99ff1..7a6279daa8b9 100644
8586 --- a/drivers/watchdog/gpio_wdt.c
8587 +++ b/drivers/watchdog/gpio_wdt.c
8588 @@ -80,7 +80,8 @@ static int gpio_wdt_stop(struct watchdog_device *wdd)
8589
8590 if (!priv->always_running) {
8591 gpio_wdt_disable(priv);
8592 - clear_bit(WDOG_HW_RUNNING, &wdd->status);
8593 + } else {
8594 + set_bit(WDOG_HW_RUNNING, &wdd->status);
8595 }
8596
8597 return 0;
8598 diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
8599 index 4874b0f18650..518dfa1047cb 100644
8600 --- a/drivers/watchdog/imx2_wdt.c
8601 +++ b/drivers/watchdog/imx2_wdt.c
8602 @@ -169,15 +169,21 @@ static int imx2_wdt_ping(struct watchdog_device *wdog)
8603 return 0;
8604 }
8605
8606 -static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
8607 - unsigned int new_timeout)
8608 +static void __imx2_wdt_set_timeout(struct watchdog_device *wdog,
8609 + unsigned int new_timeout)
8610 {
8611 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
8612
8613 - wdog->timeout = new_timeout;
8614 -
8615 regmap_update_bits(wdev->regmap, IMX2_WDT_WCR, IMX2_WDT_WCR_WT,
8616 WDOG_SEC_TO_COUNT(new_timeout));
8617 +}
8618 +
8619 +static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
8620 + unsigned int new_timeout)
8621 +{
8622 + __imx2_wdt_set_timeout(wdog, new_timeout);
8623 +
8624 + wdog->timeout = new_timeout;
8625 return 0;
8626 }
8627
8628 @@ -371,7 +377,11 @@ static int imx2_wdt_suspend(struct device *dev)
8629
8630 /* The watchdog IP block is running */
8631 if (imx2_wdt_is_running(wdev)) {
8632 - imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
8633 + /*
8634 + * Don't update wdog->timeout, we'll restore the current value
8635 + * during resume.
8636 + */
8637 + __imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
8638 imx2_wdt_ping(wdog);
8639 }
8640
8641 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
8642 index c71afd424900..5eaedff28a32 100644
8643 --- a/fs/btrfs/inode.c
8644 +++ b/fs/btrfs/inode.c
8645 @@ -2101,8 +2101,15 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
8646 goto out;
8647 }
8648
8649 - btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state,
8650 - 0);
8651 + ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
8652 + &cached_state, 0);
8653 + if (ret) {
8654 + mapping_set_error(page->mapping, ret);
8655 + end_extent_writepage(page, ret, page_start, page_end);
8656 + ClearPageChecked(page);
8657 + goto out;
8658 + }
8659 +
8660 ClearPageChecked(page);
8661 set_page_dirty(page);
8662 out:
8663 diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
8664 index 24a62224b24b..6154825c30e1 100644
8665 --- a/fs/btrfs/raid56.c
8666 +++ b/fs/btrfs/raid56.c
8667 @@ -1432,14 +1432,13 @@ static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
8668 */
8669 static void set_bio_pages_uptodate(struct bio *bio)
8670 {
8671 - struct bio_vec bvec;
8672 - struct bvec_iter iter;
8673 + struct bio_vec *bvec;
8674 + int i;
8675
8676 - if (bio_flagged(bio, BIO_CLONED))
8677 - bio->bi_iter = btrfs_io_bio(bio)->iter;
8678 + ASSERT(!bio_flagged(bio, BIO_CLONED));
8679
8680 - bio_for_each_segment(bvec, bio, iter)
8681 - SetPageUptodate(bvec.bv_page);
8682 + bio_for_each_segment_all(bvec, bio, i)
8683 + SetPageUptodate(bvec->bv_page);
8684 }
8685
8686 /*
8687 diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
8688 index 68abbb0db608..f2b0a7f124da 100644
8689 --- a/fs/cifs/cifsencrypt.c
8690 +++ b/fs/cifs/cifsencrypt.c
8691 @@ -325,9 +325,8 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
8692 {
8693 int i;
8694 int rc;
8695 - char password_with_pad[CIFS_ENCPWD_SIZE];
8696 + char password_with_pad[CIFS_ENCPWD_SIZE] = {0};
8697
8698 - memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
8699 if (password)
8700 strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE);
8701
8702 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
8703 index 0bfc2280436d..f7db2fedfa8c 100644
8704 --- a/fs/cifs/connect.c
8705 +++ b/fs/cifs/connect.c
8706 @@ -1707,7 +1707,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
8707 tmp_end++;
8708 if (!(tmp_end < end && tmp_end[1] == delim)) {
8709 /* No it is not. Set the password to NULL */
8710 - kfree(vol->password);
8711 + kzfree(vol->password);
8712 vol->password = NULL;
8713 break;
8714 }
8715 @@ -1745,7 +1745,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
8716 options = end;
8717 }
8718
8719 - kfree(vol->password);
8720 + kzfree(vol->password);
8721 /* Now build new password string */
8722 temp_len = strlen(value);
8723 vol->password = kzalloc(temp_len+1, GFP_KERNEL);
8724 @@ -4235,7 +4235,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
8725 reset_cifs_unix_caps(0, tcon, NULL, vol_info);
8726 out:
8727 kfree(vol_info->username);
8728 - kfree(vol_info->password);
8729 + kzfree(vol_info->password);
8730 kfree(vol_info);
8731
8732 return tcon;
8733 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
8734 index 92fdf9c35de2..7d6539a04fac 100644
8735 --- a/fs/cifs/file.c
8736 +++ b/fs/cifs/file.c
8737 @@ -3488,20 +3488,18 @@ static const struct vm_operations_struct cifs_file_vm_ops = {
8738
8739 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
8740 {
8741 - int rc, xid;
8742 + int xid, rc = 0;
8743 struct inode *inode = file_inode(file);
8744
8745 xid = get_xid();
8746
8747 - if (!CIFS_CACHE_READ(CIFS_I(inode))) {
8748 + if (!CIFS_CACHE_READ(CIFS_I(inode)))
8749 rc = cifs_zap_mapping(inode);
8750 - if (rc)
8751 - return rc;
8752 - }
8753 -
8754 - rc = generic_file_mmap(file, vma);
8755 - if (rc == 0)
8756 + if (!rc)
8757 + rc = generic_file_mmap(file, vma);
8758 + if (!rc)
8759 vma->vm_ops = &cifs_file_vm_ops;
8760 +
8761 free_xid(xid);
8762 return rc;
8763 }
8764 @@ -3511,16 +3509,16 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
8765 int rc, xid;
8766
8767 xid = get_xid();
8768 +
8769 rc = cifs_revalidate_file(file);
8770 - if (rc) {
8771 + if (rc)
8772 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
8773 rc);
8774 - free_xid(xid);
8775 - return rc;
8776 - }
8777 - rc = generic_file_mmap(file, vma);
8778 - if (rc == 0)
8779 + if (!rc)
8780 + rc = generic_file_mmap(file, vma);
8781 + if (!rc)
8782 vma->vm_ops = &cifs_file_vm_ops;
8783 +
8784 free_xid(xid);
8785 return rc;
8786 }
8787 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
8788 index eea93ac15ef0..a0dbced4a45c 100644
8789 --- a/fs/cifs/misc.c
8790 +++ b/fs/cifs/misc.c
8791 @@ -98,14 +98,11 @@ sesInfoFree(struct cifs_ses *buf_to_free)
8792 kfree(buf_to_free->serverOS);
8793 kfree(buf_to_free->serverDomain);
8794 kfree(buf_to_free->serverNOS);
8795 - if (buf_to_free->password) {
8796 - memset(buf_to_free->password, 0, strlen(buf_to_free->password));
8797 - kfree(buf_to_free->password);
8798 - }
8799 + kzfree(buf_to_free->password);
8800 kfree(buf_to_free->user_name);
8801 kfree(buf_to_free->domainName);
8802 - kfree(buf_to_free->auth_key.response);
8803 - kfree(buf_to_free);
8804 + kzfree(buf_to_free->auth_key.response);
8805 + kzfree(buf_to_free);
8806 }
8807
8808 struct cifs_tcon *
8809 @@ -136,10 +133,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
8810 }
8811 atomic_dec(&tconInfoAllocCount);
8812 kfree(buf_to_free->nativeFileSystem);
8813 - if (buf_to_free->password) {
8814 - memset(buf_to_free->password, 0, strlen(buf_to_free->password));
8815 - kfree(buf_to_free->password);
8816 - }
8817 + kzfree(buf_to_free->password);
8818 kfree(buf_to_free);
8819 }
8820
8821 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
8822 index 01346b8b6edb..66af1f8a13cc 100644
8823 --- a/fs/cifs/smb2pdu.c
8824 +++ b/fs/cifs/smb2pdu.c
8825 @@ -733,8 +733,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
8826 }
8827
8828 /* check validate negotiate info response matches what we got earlier */
8829 - if (pneg_rsp->Dialect !=
8830 - cpu_to_le16(tcon->ses->server->vals->protocol_id))
8831 + if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect))
8832 goto vneg_out;
8833
8834 if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
8835 diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
8836 index 7eae33ffa3fc..e31d6ed3ec32 100644
8837 --- a/fs/devpts/inode.c
8838 +++ b/fs/devpts/inode.c
8839 @@ -168,11 +168,11 @@ struct vfsmount *devpts_mntget(struct file *filp, struct pts_fs_info *fsi)
8840 dput(path.dentry);
8841 if (err) {
8842 mntput(path.mnt);
8843 - path.mnt = ERR_PTR(err);
8844 + return ERR_PTR(err);
8845 }
8846 if (DEVPTS_SB(path.mnt->mnt_sb) != fsi) {
8847 mntput(path.mnt);
8848 - path.mnt = ERR_PTR(-ENODEV);
8849 + return ERR_PTR(-ENODEV);
8850 }
8851 return path.mnt;
8852 }
8853 diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
8854 index 9698e51656b1..d8f49c412f50 100644
8855 --- a/fs/kernfs/file.c
8856 +++ b/fs/kernfs/file.c
8857 @@ -275,7 +275,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
8858 {
8859 struct kernfs_open_file *of = kernfs_of(file);
8860 const struct kernfs_ops *ops;
8861 - size_t len;
8862 + ssize_t len;
8863 char *buf;
8864
8865 if (of->atomic_write_len) {
8866 diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
8867 index d2972d537469..8c10b0562e75 100644
8868 --- a/fs/nfs/direct.c
8869 +++ b/fs/nfs/direct.c
8870 @@ -775,10 +775,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
8871
8872 spin_lock(&dreq->lock);
8873
8874 - if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
8875 - dreq->flags = 0;
8876 + if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
8877 dreq->error = hdr->error;
8878 - }
8879 if (dreq->error == 0) {
8880 nfs_direct_good_bytes(dreq, hdr);
8881 if (nfs_write_need_commit(hdr)) {
8882 diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
8883 index 508126eb49f9..3db2b7464748 100644
8884 --- a/fs/nfs/filelayout/filelayout.c
8885 +++ b/fs/nfs/filelayout/filelayout.c
8886 @@ -895,9 +895,7 @@ fl_pnfs_update_layout(struct inode *ino,
8887
8888 lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
8889 gfp_flags);
8890 - if (!lseg)
8891 - lseg = ERR_PTR(-ENOMEM);
8892 - if (IS_ERR(lseg))
8893 + if (IS_ERR_OR_NULL(lseg))
8894 goto out;
8895
8896 lo = NFS_I(ino)->layout;
8897 diff --git a/fs/nfs/io.c b/fs/nfs/io.c
8898 index 20fef85d2bb1..9034b4926909 100644
8899 --- a/fs/nfs/io.c
8900 +++ b/fs/nfs/io.c
8901 @@ -99,7 +99,7 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode)
8902 {
8903 if (!test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
8904 set_bit(NFS_INO_ODIRECT, &nfsi->flags);
8905 - nfs_wb_all(inode);
8906 + nfs_sync_mapping(inode->i_mapping);
8907 }
8908 }
8909
8910 diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
8911 index 30426c1a1bbd..22dc30a679a0 100644
8912 --- a/fs/nfs/nfs4idmap.c
8913 +++ b/fs/nfs/nfs4idmap.c
8914 @@ -568,9 +568,13 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
8915 struct idmap_msg *im;
8916 struct idmap *idmap = (struct idmap *)aux;
8917 struct key *key = cons->key;
8918 - int ret = -ENOMEM;
8919 + int ret = -ENOKEY;
8920 +
8921 + if (!aux)
8922 + goto out1;
8923
8924 /* msg and im are freed in idmap_pipe_destroy_msg */
8925 + ret = -ENOMEM;
8926 data = kzalloc(sizeof(*data), GFP_KERNEL);
8927 if (!data)
8928 goto out1;
8929 diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
8930 index 14ed9791ec9c..549c916d2859 100644
8931 --- a/fs/nfs/nfs4xdr.c
8932 +++ b/fs/nfs/nfs4xdr.c
8933 @@ -7668,6 +7668,22 @@ nfs4_stat_to_errno(int stat)
8934 .p_name = #proc, \
8935 }
8936
8937 +#if defined(CONFIG_NFS_V4_1)
8938 +#define PROC41(proc, argtype, restype) \
8939 + PROC(proc, argtype, restype)
8940 +#else
8941 +#define PROC41(proc, argtype, restype) \
8942 + STUB(proc)
8943 +#endif
8944 +
8945 +#if defined(CONFIG_NFS_V4_2)
8946 +#define PROC42(proc, argtype, restype) \
8947 + PROC(proc, argtype, restype)
8948 +#else
8949 +#define PROC42(proc, argtype, restype) \
8950 + STUB(proc)
8951 +#endif
8952 +
8953 const struct rpc_procinfo nfs4_procedures[] = {
8954 PROC(READ, enc_read, dec_read),
8955 PROC(WRITE, enc_write, dec_write),
8956 @@ -7688,7 +7704,6 @@ const struct rpc_procinfo nfs4_procedures[] = {
8957 PROC(ACCESS, enc_access, dec_access),
8958 PROC(GETATTR, enc_getattr, dec_getattr),
8959 PROC(LOOKUP, enc_lookup, dec_lookup),
8960 - PROC(LOOKUPP, enc_lookupp, dec_lookupp),
8961 PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root),
8962 PROC(REMOVE, enc_remove, dec_remove),
8963 PROC(RENAME, enc_rename, dec_rename),
8964 @@ -7707,33 +7722,30 @@ const struct rpc_procinfo nfs4_procedures[] = {
8965 PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
8966 PROC(SECINFO, enc_secinfo, dec_secinfo),
8967 PROC(FSID_PRESENT, enc_fsid_present, dec_fsid_present),
8968 -#if defined(CONFIG_NFS_V4_1)
8969 - PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
8970 - PROC(CREATE_SESSION, enc_create_session, dec_create_session),
8971 - PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
8972 - PROC(SEQUENCE, enc_sequence, dec_sequence),
8973 - PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
8974 - PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
8975 - PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
8976 - PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
8977 - PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
8978 - PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn),
8979 - PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
8980 - PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
8981 - PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
8982 + PROC41(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
8983 + PROC41(CREATE_SESSION, enc_create_session, dec_create_session),
8984 + PROC41(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
8985 + PROC41(SEQUENCE, enc_sequence, dec_sequence),
8986 + PROC41(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
8987 + PROC41(RECLAIM_COMPLETE,enc_reclaim_complete, dec_reclaim_complete),
8988 + PROC41(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
8989 + PROC41(LAYOUTGET, enc_layoutget, dec_layoutget),
8990 + PROC41(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
8991 + PROC41(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn),
8992 + PROC41(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
8993 + PROC41(TEST_STATEID, enc_test_stateid, dec_test_stateid),
8994 + PROC41(FREE_STATEID, enc_free_stateid, dec_free_stateid),
8995 STUB(GETDEVICELIST),
8996 - PROC(BIND_CONN_TO_SESSION,
8997 + PROC41(BIND_CONN_TO_SESSION,
8998 enc_bind_conn_to_session, dec_bind_conn_to_session),
8999 - PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
9000 -#endif /* CONFIG_NFS_V4_1 */
9001 -#ifdef CONFIG_NFS_V4_2
9002 - PROC(SEEK, enc_seek, dec_seek),
9003 - PROC(ALLOCATE, enc_allocate, dec_allocate),
9004 - PROC(DEALLOCATE, enc_deallocate, dec_deallocate),
9005 - PROC(LAYOUTSTATS, enc_layoutstats, dec_layoutstats),
9006 - PROC(CLONE, enc_clone, dec_clone),
9007 - PROC(COPY, enc_copy, dec_copy),
9008 -#endif /* CONFIG_NFS_V4_2 */
9009 + PROC41(DESTROY_CLIENTID,enc_destroy_clientid, dec_destroy_clientid),
9010 + PROC42(SEEK, enc_seek, dec_seek),
9011 + PROC42(ALLOCATE, enc_allocate, dec_allocate),
9012 + PROC42(DEALLOCATE, enc_deallocate, dec_deallocate),
9013 + PROC42(LAYOUTSTATS, enc_layoutstats, dec_layoutstats),
9014 + PROC42(CLONE, enc_clone, dec_clone),
9015 + PROC42(COPY, enc_copy, dec_copy),
9016 + PROC(LOOKUPP, enc_lookupp, dec_lookupp),
9017 };
9018
9019 static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)];
9020 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
9021 index 3bcd669a3152..5f2f852ef506 100644
9022 --- a/fs/nfs/pnfs.c
9023 +++ b/fs/nfs/pnfs.c
9024 @@ -2237,7 +2237,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
9025 nfs_pageio_reset_write_mds(desc);
9026 mirror->pg_recoalesce = 1;
9027 }
9028 - hdr->release(hdr);
9029 + hdr->completion_ops->completion(hdr);
9030 }
9031
9032 static enum pnfs_try_status
9033 @@ -2360,7 +2360,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
9034 nfs_pageio_reset_read_mds(desc);
9035 mirror->pg_recoalesce = 1;
9036 }
9037 - hdr->release(hdr);
9038 + hdr->completion_ops->completion(hdr);
9039 }
9040
9041 /*
9042 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
9043 index de325804941d..76da415be39a 100644
9044 --- a/fs/nfs/write.c
9045 +++ b/fs/nfs/write.c
9046 @@ -1836,6 +1836,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
9047 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
9048 next:
9049 nfs_unlock_and_release_request(req);
9050 + /* Latency breaker */
9051 + cond_resched();
9052 }
9053 nfss = NFS_SERVER(data->inode);
9054 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
9055 diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
9056 index d94a51dc4e32..7fa7d68baa6d 100644
9057 --- a/fs/overlayfs/readdir.c
9058 +++ b/fs/overlayfs/readdir.c
9059 @@ -575,8 +575,15 @@ static struct ovl_dir_cache *ovl_cache_get_impure(struct path *path)
9060 return ERR_PTR(res);
9061 }
9062 if (list_empty(&cache->entries)) {
9063 - /* Good oportunity to get rid of an unnecessary "impure" flag */
9064 - ovl_do_removexattr(ovl_dentry_upper(dentry), OVL_XATTR_IMPURE);
9065 + /*
9066 + * A good opportunity to get rid of an unneeded "impure" flag.
9067 + * Removing the "impure" xattr is best effort.
9068 + */
9069 + if (!ovl_want_write(dentry)) {
9070 + ovl_do_removexattr(ovl_dentry_upper(dentry),
9071 + OVL_XATTR_IMPURE);
9072 + ovl_drop_write(dentry);
9073 + }
9074 ovl_clear_flag(OVL_IMPURE, d_inode(dentry));
9075 kfree(cache);
9076 return NULL;
9077 @@ -751,10 +758,14 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
9078 struct dentry *dentry = file->f_path.dentry;
9079 struct file *realfile = od->realfile;
9080
9081 + /* Nothing to sync for lower */
9082 + if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
9083 + return 0;
9084 +
9085 /*
9086 * Need to check if we started out being a lower dir, but got copied up
9087 */
9088 - if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
9089 + if (!od->is_upper) {
9090 struct inode *inode = file_inode(file);
9091
9092 realfile = READ_ONCE(od->upperfile);
9093 diff --git a/fs/pipe.c b/fs/pipe.c
9094 index f0f4ab36c444..8ef7d7bef775 100644
9095 --- a/fs/pipe.c
9096 +++ b/fs/pipe.c
9097 @@ -610,12 +610,17 @@ static unsigned long account_pipe_buffers(struct user_struct *user,
9098
9099 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
9100 {
9101 - return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft;
9102 + return pipe_user_pages_soft && user_bufs > pipe_user_pages_soft;
9103 }
9104
9105 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
9106 {
9107 - return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard;
9108 + return pipe_user_pages_hard && user_bufs > pipe_user_pages_hard;
9109 +}
9110 +
9111 +static bool is_unprivileged_user(void)
9112 +{
9113 + return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
9114 }
9115
9116 struct pipe_inode_info *alloc_pipe_info(void)
9117 @@ -634,12 +639,12 @@ struct pipe_inode_info *alloc_pipe_info(void)
9118
9119 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
9120
9121 - if (too_many_pipe_buffers_soft(user_bufs)) {
9122 + if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
9123 user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
9124 pipe_bufs = 1;
9125 }
9126
9127 - if (too_many_pipe_buffers_hard(user_bufs))
9128 + if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
9129 goto out_revert_acct;
9130
9131 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
9132 @@ -1069,7 +1074,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
9133 if (nr_pages > pipe->buffers &&
9134 (too_many_pipe_buffers_hard(user_bufs) ||
9135 too_many_pipe_buffers_soft(user_bufs)) &&
9136 - !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
9137 + is_unprivileged_user()) {
9138 ret = -EPERM;
9139 goto out_revert_acct;
9140 }
9141 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
9142 index 4bc85cb8be6a..e8a93bc8285d 100644
9143 --- a/fs/proc/kcore.c
9144 +++ b/fs/proc/kcore.c
9145 @@ -512,23 +512,15 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
9146 return -EFAULT;
9147 } else {
9148 if (kern_addr_valid(start)) {
9149 - unsigned long n;
9150 -
9151 /*
9152 * Using bounce buffer to bypass the
9153 * hardened user copy kernel text checks.
9154 */
9155 - memcpy(buf, (char *) start, tsz);
9156 - n = copy_to_user(buffer, buf, tsz);
9157 - /*
9158 - * We cannot distinguish between fault on source
9159 - * and fault on destination. When this happens
9160 - * we clear too and hope it will trigger the
9161 - * EFAULT again.
9162 - */
9163 - if (n) {
9164 - if (clear_user(buffer + tsz - n,
9165 - n))
9166 + if (probe_kernel_read(buf, (void *) start, tsz)) {
9167 + if (clear_user(buffer, tsz))
9168 + return -EFAULT;
9169 + } else {
9170 + if (copy_to_user(buffer, buf, tsz))
9171 return -EFAULT;
9172 }
9173 } else {
9174 diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
9175 index 417fe0b29f23..ef820f803176 100644
9176 --- a/fs/ubifs/dir.c
9177 +++ b/fs/ubifs/dir.c
9178 @@ -1216,10 +1216,8 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
9179 ostr.len = disk_link.len;
9180
9181 err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
9182 - if (err) {
9183 - kfree(sd);
9184 + if (err)
9185 goto out_inode;
9186 - }
9187
9188 sd->len = cpu_to_le16(ostr.len);
9189 disk_link.name = (char *)sd;
9190 @@ -1251,11 +1249,10 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
9191 goto out_cancel;
9192 mutex_unlock(&dir_ui->ui_mutex);
9193
9194 - ubifs_release_budget(c, &req);
9195 insert_inode_hash(inode);
9196 d_instantiate(dentry, inode);
9197 - fscrypt_free_filename(&nm);
9198 - return 0;
9199 + err = 0;
9200 + goto out_fname;
9201
9202 out_cancel:
9203 dir->i_size -= sz_change;
9204 @@ -1268,6 +1265,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
9205 fscrypt_free_filename(&nm);
9206 out_budg:
9207 ubifs_release_budget(c, &req);
9208 + kfree(sd);
9209 return err;
9210 }
9211
9212 diff --git a/include/crypto/hash.h b/include/crypto/hash.h
9213 index b5727bcd2336..74827781593c 100644
9214 --- a/include/crypto/hash.h
9215 +++ b/include/crypto/hash.h
9216 @@ -205,7 +205,6 @@ struct crypto_ahash {
9217 unsigned int keylen);
9218
9219 unsigned int reqsize;
9220 - bool has_setkey;
9221 struct crypto_tfm base;
9222 };
9223
9224 @@ -405,11 +404,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
9225 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
9226 unsigned int keylen);
9227
9228 -static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
9229 -{
9230 - return tfm->has_setkey;
9231 -}
9232 -
9233 /**
9234 * crypto_ahash_finup() - update and finalize message digest
9235 * @req: reference to the ahash_request handle that holds all information
9236 @@ -481,7 +475,12 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out)
9237 */
9238 static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
9239 {
9240 - return crypto_ahash_reqtfm(req)->import(req, in);
9241 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
9242 +
9243 + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
9244 + return -ENOKEY;
9245 +
9246 + return tfm->import(req, in);
9247 }
9248
9249 /**
9250 @@ -498,7 +497,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
9251 */
9252 static inline int crypto_ahash_init(struct ahash_request *req)
9253 {
9254 - return crypto_ahash_reqtfm(req)->init(req);
9255 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
9256 +
9257 + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
9258 + return -ENOKEY;
9259 +
9260 + return tfm->init(req);
9261 }
9262
9263 /**
9264 @@ -851,7 +855,12 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
9265 */
9266 static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
9267 {
9268 - return crypto_shash_alg(desc->tfm)->import(desc, in);
9269 + struct crypto_shash *tfm = desc->tfm;
9270 +
9271 + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
9272 + return -ENOKEY;
9273 +
9274 + return crypto_shash_alg(tfm)->import(desc, in);
9275 }
9276
9277 /**
9278 @@ -867,7 +876,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
9279 */
9280 static inline int crypto_shash_init(struct shash_desc *desc)
9281 {
9282 - return crypto_shash_alg(desc->tfm)->init(desc);
9283 + struct crypto_shash *tfm = desc->tfm;
9284 +
9285 + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
9286 + return -ENOKEY;
9287 +
9288 + return crypto_shash_alg(tfm)->init(desc);
9289 }
9290
9291 /**
9292 diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
9293 index c2bae8da642c..27040a46d50a 100644
9294 --- a/include/crypto/internal/hash.h
9295 +++ b/include/crypto/internal/hash.h
9296 @@ -90,6 +90,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
9297 return alg->setkey != shash_no_setkey;
9298 }
9299
9300 +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
9301 +
9302 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
9303 struct hash_alg_common *alg,
9304 struct crypto_instance *inst);
9305 diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
9306 index c65567d01e8e..f718a19da82f 100644
9307 --- a/include/crypto/poly1305.h
9308 +++ b/include/crypto/poly1305.h
9309 @@ -31,8 +31,6 @@ struct poly1305_desc_ctx {
9310 };
9311
9312 int crypto_poly1305_init(struct shash_desc *desc);
9313 -int crypto_poly1305_setkey(struct crypto_shash *tfm,
9314 - const u8 *key, unsigned int keylen);
9315 unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
9316 const u8 *src, unsigned int srclen);
9317 int crypto_poly1305_update(struct shash_desc *desc,
9318 diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
9319 new file mode 100644
9320 index 000000000000..e518e4e3dfb5
9321 --- /dev/null
9322 +++ b/include/kvm/arm_psci.h
9323 @@ -0,0 +1,51 @@
9324 +/*
9325 + * Copyright (C) 2012,2013 - ARM Ltd
9326 + * Author: Marc Zyngier <marc.zyngier@arm.com>
9327 + *
9328 + * This program is free software; you can redistribute it and/or modify
9329 + * it under the terms of the GNU General Public License version 2 as
9330 + * published by the Free Software Foundation.
9331 + *
9332 + * This program is distributed in the hope that it will be useful,
9333 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9334 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9335 + * GNU General Public License for more details.
9336 + *
9337 + * You should have received a copy of the GNU General Public License
9338 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9339 + */
9340 +
9341 +#ifndef __KVM_ARM_PSCI_H__
9342 +#define __KVM_ARM_PSCI_H__
9343 +
9344 +#include <linux/kvm_host.h>
9345 +#include <uapi/linux/psci.h>
9346 +
9347 +#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
9348 +#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
9349 +#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
9350 +
9351 +#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
9352 +
9353 +/*
9354 + * We need the KVM pointer independently from the vcpu as we can call
9355 + * this from HYP, and need to apply kern_hyp_va on it...
9356 + */
9357 +static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
9358 +{
9359 + /*
9360 + * Our PSCI implementation stays the same across versions from
9361 + * v0.2 onward, only adding the few mandatory functions (such
9362 + * as FEATURES with 1.0) that are required by newer
9363 + * revisions. It is thus safe to return the latest.
9364 + */
9365 + if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
9366 + return KVM_ARM_PSCI_LATEST;
9367 +
9368 + return KVM_ARM_PSCI_0_1;
9369 +}
9370 +
9371 +
9372 +int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
9373 +
9374 +#endif /* __KVM_ARM_PSCI_H__ */
9375 diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
9376 index 4c5bca38c653..a031897fca76 100644
9377 --- a/include/linux/arm-smccc.h
9378 +++ b/include/linux/arm-smccc.h
9379 @@ -14,14 +14,16 @@
9380 #ifndef __LINUX_ARM_SMCCC_H
9381 #define __LINUX_ARM_SMCCC_H
9382
9383 +#include <uapi/linux/const.h>
9384 +
9385 /*
9386 * This file provides common defines for ARM SMC Calling Convention as
9387 * specified in
9388 * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
9389 */
9390
9391 -#define ARM_SMCCC_STD_CALL 0
9392 -#define ARM_SMCCC_FAST_CALL 1
9393 +#define ARM_SMCCC_STD_CALL _AC(0,U)
9394 +#define ARM_SMCCC_FAST_CALL _AC(1,U)
9395 #define ARM_SMCCC_TYPE_SHIFT 31
9396
9397 #define ARM_SMCCC_SMC_32 0
9398 @@ -60,6 +62,24 @@
9399 #define ARM_SMCCC_QUIRK_NONE 0
9400 #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
9401
9402 +#define ARM_SMCCC_VERSION_1_0 0x10000
9403 +#define ARM_SMCCC_VERSION_1_1 0x10001
9404 +
9405 +#define ARM_SMCCC_VERSION_FUNC_ID \
9406 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
9407 + ARM_SMCCC_SMC_32, \
9408 + 0, 0)
9409 +
9410 +#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \
9411 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
9412 + ARM_SMCCC_SMC_32, \
9413 + 0, 1)
9414 +
9415 +#define ARM_SMCCC_ARCH_WORKAROUND_1 \
9416 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
9417 + ARM_SMCCC_SMC_32, \
9418 + 0, 0x8000)
9419 +
9420 #ifndef __ASSEMBLY__
9421
9422 #include <linux/linkage.h>
9423 @@ -130,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
9424
9425 #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
9426
9427 +/* SMCCC v1.1 implementation madness follows */
9428 +#ifdef CONFIG_ARM64
9429 +
9430 +#define SMCCC_SMC_INST "smc #0"
9431 +#define SMCCC_HVC_INST "hvc #0"
9432 +
9433 +#elif defined(CONFIG_ARM)
9434 +#include <asm/opcodes-sec.h>
9435 +#include <asm/opcodes-virt.h>
9436 +
9437 +#define SMCCC_SMC_INST __SMC(0)
9438 +#define SMCCC_HVC_INST __HVC(0)
9439 +
9440 +#endif
9441 +
9442 +#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
9443 +
9444 +#define __count_args(...) \
9445 + ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
9446 +
9447 +#define __constraint_write_0 \
9448 + "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
9449 +#define __constraint_write_1 \
9450 + "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
9451 +#define __constraint_write_2 \
9452 + "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
9453 +#define __constraint_write_3 \
9454 + "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
9455 +#define __constraint_write_4 __constraint_write_3
9456 +#define __constraint_write_5 __constraint_write_4
9457 +#define __constraint_write_6 __constraint_write_5
9458 +#define __constraint_write_7 __constraint_write_6
9459 +
9460 +#define __constraint_read_0
9461 +#define __constraint_read_1
9462 +#define __constraint_read_2
9463 +#define __constraint_read_3
9464 +#define __constraint_read_4 "r" (r4)
9465 +#define __constraint_read_5 __constraint_read_4, "r" (r5)
9466 +#define __constraint_read_6 __constraint_read_5, "r" (r6)
9467 +#define __constraint_read_7 __constraint_read_6, "r" (r7)
9468 +
9469 +#define __declare_arg_0(a0, res) \
9470 + struct arm_smccc_res *___res = res; \
9471 + register u32 r0 asm("r0") = a0; \
9472 + register unsigned long r1 asm("r1"); \
9473 + register unsigned long r2 asm("r2"); \
9474 + register unsigned long r3 asm("r3")
9475 +
9476 +#define __declare_arg_1(a0, a1, res) \
9477 + struct arm_smccc_res *___res = res; \
9478 + register u32 r0 asm("r0") = a0; \
9479 + register typeof(a1) r1 asm("r1") = a1; \
9480 + register unsigned long r2 asm("r2"); \
9481 + register unsigned long r3 asm("r3")
9482 +
9483 +#define __declare_arg_2(a0, a1, a2, res) \
9484 + struct arm_smccc_res *___res = res; \
9485 + register u32 r0 asm("r0") = a0; \
9486 + register typeof(a1) r1 asm("r1") = a1; \
9487 + register typeof(a2) r2 asm("r2") = a2; \
9488 + register unsigned long r3 asm("r3")
9489 +
9490 +#define __declare_arg_3(a0, a1, a2, a3, res) \
9491 + struct arm_smccc_res *___res = res; \
9492 + register u32 r0 asm("r0") = a0; \
9493 + register typeof(a1) r1 asm("r1") = a1; \
9494 + register typeof(a2) r2 asm("r2") = a2; \
9495 + register typeof(a3) r3 asm("r3") = a3
9496 +
9497 +#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
9498 + __declare_arg_3(a0, a1, a2, a3, res); \
9499 + register typeof(a4) r4 asm("r4") = a4
9500 +
9501 +#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
9502 + __declare_arg_4(a0, a1, a2, a3, a4, res); \
9503 + register typeof(a5) r5 asm("r5") = a5
9504 +
9505 +#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
9506 + __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
9507 + register typeof(a6) r6 asm("r6") = a6
9508 +
9509 +#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
9510 + __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
9511 + register typeof(a7) r7 asm("r7") = a7
9512 +
9513 +#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
9514 +#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
9515 +
9516 +#define ___constraints(count) \
9517 + : __constraint_write_ ## count \
9518 + : __constraint_read_ ## count \
9519 + : "memory"
9520 +#define __constraints(count) ___constraints(count)
9521 +
9522 +/*
9523 + * We have an output list that is not necessarily used, and GCC feels
9524 + * entitled to optimise the whole sequence away. "volatile" is what
9525 + * makes it stick.
9526 + */
9527 +#define __arm_smccc_1_1(inst, ...) \
9528 + do { \
9529 + __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
9530 + asm volatile(inst "\n" \
9531 + __constraints(__count_args(__VA_ARGS__))); \
9532 + if (___res) \
9533 + *___res = (typeof(*___res)){r0, r1, r2, r3}; \
9534 + } while (0)
9535 +
9536 +/*
9537 + * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
9538 + *
9539 + * This is a variadic macro taking one to eight source arguments, and
9540 + * an optional return structure.
9541 + *
9542 + * @a0-a7: arguments passed in registers 0 to 7
9543 + * @res: result values from registers 0 to 3
9544 + *
9545 + * This macro is used to make SMC calls following SMC Calling Convention v1.1.
9546 + * The content of the supplied param are copied to registers 0 to 7 prior
9547 + * to the SMC instruction. The return values are updated with the content
9548 + * from register 0 to 3 on return from the SMC instruction if not NULL.
9549 + */
9550 +#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
9551 +
9552 +/*
9553 + * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
9554 + *
9555 + * This is a variadic macro taking one to eight source arguments, and
9556 + * an optional return structure.
9557 + *
9558 + * @a0-a7: arguments passed in registers 0 to 7
9559 + * @res: result values from registers 0 to 3
9560 + *
9561 + * This macro is used to make HVC calls following SMC Calling Convention v1.1.
9562 + * The content of the supplied param are copied to registers 0 to 7 prior
9563 + * to the HVC instruction. The return values are updated with the content
9564 + * from register 0 to 3 on return from the HVC instruction if not NULL.
9565 + */
9566 +#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
9567 +
9568 #endif /*__ASSEMBLY__*/
9569 #endif /*__LINUX_ARM_SMCCC_H*/
9570 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
9571 index 84da9978e951..cc36484d29e1 100644
9572 --- a/include/linux/crypto.h
9573 +++ b/include/linux/crypto.h
9574 @@ -105,9 +105,17 @@
9575 */
9576 #define CRYPTO_ALG_INTERNAL 0x00002000
9577
9578 +/*
9579 + * Set if the algorithm has a ->setkey() method but can be used without
9580 + * calling it first, i.e. there is a default key.
9581 + */
9582 +#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
9583 +
9584 /*
9585 * Transform masks and values (for crt_flags).
9586 */
9587 +#define CRYPTO_TFM_NEED_KEY 0x00000001
9588 +
9589 #define CRYPTO_TFM_REQ_MASK 0x000fff00
9590 #define CRYPTO_TFM_RES_MASK 0xfff00000
9591
9592 diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
9593 index 3aa56e3104bb..b5b43f94f311 100644
9594 --- a/include/linux/mtd/map.h
9595 +++ b/include/linux/mtd/map.h
9596 @@ -270,75 +270,67 @@ void map_destroy(struct mtd_info *mtd);
9597 #define INVALIDATE_CACHED_RANGE(map, from, size) \
9598 do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
9599
9600 -
9601 -static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
9602 -{
9603 - int i;
9604 -
9605 - for (i = 0; i < map_words(map); i++) {
9606 - if (val1.x[i] != val2.x[i])
9607 - return 0;
9608 - }
9609 -
9610 - return 1;
9611 -}
9612 -
9613 -static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
9614 -{
9615 - map_word r;
9616 - int i;
9617 -
9618 - for (i = 0; i < map_words(map); i++)
9619 - r.x[i] = val1.x[i] & val2.x[i];
9620 -
9621 - return r;
9622 -}
9623 -
9624 -static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
9625 -{
9626 - map_word r;
9627 - int i;
9628 -
9629 - for (i = 0; i < map_words(map); i++)
9630 - r.x[i] = val1.x[i] & ~val2.x[i];
9631 -
9632 - return r;
9633 -}
9634 -
9635 -static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
9636 -{
9637 - map_word r;
9638 - int i;
9639 -
9640 - for (i = 0; i < map_words(map); i++)
9641 - r.x[i] = val1.x[i] | val2.x[i];
9642 -
9643 - return r;
9644 -}
9645 -
9646 -static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
9647 -{
9648 - int i;
9649 -
9650 - for (i = 0; i < map_words(map); i++) {
9651 - if ((val1.x[i] & val2.x[i]) != val3.x[i])
9652 - return 0;
9653 - }
9654 -
9655 - return 1;
9656 -}
9657 -
9658 -static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
9659 -{
9660 - int i;
9661 -
9662 - for (i = 0; i < map_words(map); i++) {
9663 - if (val1.x[i] & val2.x[i])
9664 - return 1;
9665 - }
9666 -
9667 - return 0;
9668 -}
9669 +#define map_word_equal(map, val1, val2) \
9670 +({ \
9671 + int i, ret = 1; \
9672 + for (i = 0; i < map_words(map); i++) \
9673 + if ((val1).x[i] != (val2).x[i]) { \
9674 + ret = 0; \
9675 + break; \
9676 + } \
9677 + ret; \
9678 +})
9679 +
9680 +#define map_word_and(map, val1, val2) \
9681 +({ \
9682 + map_word r; \
9683 + int i; \
9684 + for (i = 0; i < map_words(map); i++) \
9685 + r.x[i] = (val1).x[i] & (val2).x[i]; \
9686 + r; \
9687 +})
9688 +
9689 +#define map_word_clr(map, val1, val2) \
9690 +({ \
9691 + map_word r; \
9692 + int i; \
9693 + for (i = 0; i < map_words(map); i++) \
9694 + r.x[i] = (val1).x[i] & ~(val2).x[i]; \
9695 + r; \
9696 +})
9697 +
9698 +#define map_word_or(map, val1, val2) \
9699 +({ \
9700 + map_word r; \
9701 + int i; \
9702 + for (i = 0; i < map_words(map); i++) \
9703 + r.x[i] = (val1).x[i] | (val2).x[i]; \
9704 + r; \
9705 +})
9706 +
9707 +#define map_word_andequal(map, val1, val2, val3) \
9708 +({ \
9709 + int i, ret = 1; \
9710 + for (i = 0; i < map_words(map); i++) { \
9711 + if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
9712 + ret = 0; \
9713 + break; \
9714 + } \
9715 + } \
9716 + ret; \
9717 +})
9718 +
9719 +#define map_word_bitsset(map, val1, val2) \
9720 +({ \
9721 + int i, ret = 0; \
9722 + for (i = 0; i < map_words(map); i++) { \
9723 + if ((val1).x[i] & (val2).x[i]) { \
9724 + ret = 1; \
9725 + break; \
9726 + } \
9727 + } \
9728 + ret; \
9729 +})
9730
9731 static inline map_word map_word_load(struct map_info *map, const void *ptr)
9732 {
9733 diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
9734 index 47adac640191..57ffaa20d564 100644
9735 --- a/include/linux/nfs4.h
9736 +++ b/include/linux/nfs4.h
9737 @@ -457,7 +457,12 @@ enum lock_type4 {
9738
9739 #define NFS4_DEBUG 1
9740
9741 -/* Index of predefined Linux client operations */
9742 +/*
9743 + * Index of predefined Linux client operations
9744 + *
9745 + * To ensure that /proc/net/rpc/nfs remains correctly ordered, please
9746 + * append only to this enum when adding new client operations.
9747 + */
9748
9749 enum {
9750 NFSPROC4_CLNT_NULL = 0, /* Unused */
9751 @@ -480,7 +485,6 @@ enum {
9752 NFSPROC4_CLNT_ACCESS,
9753 NFSPROC4_CLNT_GETATTR,
9754 NFSPROC4_CLNT_LOOKUP,
9755 - NFSPROC4_CLNT_LOOKUPP,
9756 NFSPROC4_CLNT_LOOKUP_ROOT,
9757 NFSPROC4_CLNT_REMOVE,
9758 NFSPROC4_CLNT_RENAME,
9759 @@ -500,7 +504,6 @@ enum {
9760 NFSPROC4_CLNT_SECINFO,
9761 NFSPROC4_CLNT_FSID_PRESENT,
9762
9763 - /* nfs41 */
9764 NFSPROC4_CLNT_EXCHANGE_ID,
9765 NFSPROC4_CLNT_CREATE_SESSION,
9766 NFSPROC4_CLNT_DESTROY_SESSION,
9767 @@ -518,13 +521,14 @@ enum {
9768 NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
9769 NFSPROC4_CLNT_DESTROY_CLIENTID,
9770
9771 - /* nfs42 */
9772 NFSPROC4_CLNT_SEEK,
9773 NFSPROC4_CLNT_ALLOCATE,
9774 NFSPROC4_CLNT_DEALLOCATE,
9775 NFSPROC4_CLNT_LAYOUTSTATS,
9776 NFSPROC4_CLNT_CLONE,
9777 NFSPROC4_CLNT_COPY,
9778 +
9779 + NFSPROC4_CLNT_LOOKUPP,
9780 };
9781
9782 /* nfs41 types */
9783 diff --git a/include/linux/psci.h b/include/linux/psci.h
9784 index bdea1cb5e1db..347077cf19c6 100644
9785 --- a/include/linux/psci.h
9786 +++ b/include/linux/psci.h
9787 @@ -25,7 +25,19 @@ bool psci_tos_resident_on(int cpu);
9788 int psci_cpu_init_idle(unsigned int cpu);
9789 int psci_cpu_suspend_enter(unsigned long index);
9790
9791 +enum psci_conduit {
9792 + PSCI_CONDUIT_NONE,
9793 + PSCI_CONDUIT_SMC,
9794 + PSCI_CONDUIT_HVC,
9795 +};
9796 +
9797 +enum smccc_version {
9798 + SMCCC_VERSION_1_0,
9799 + SMCCC_VERSION_1_1,
9800 +};
9801 +
9802 struct psci_operations {
9803 + u32 (*get_version)(void);
9804 int (*cpu_suspend)(u32 state, unsigned long entry_point);
9805 int (*cpu_off)(u32 state);
9806 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
9807 @@ -33,6 +45,8 @@ struct psci_operations {
9808 int (*affinity_info)(unsigned long target_affinity,
9809 unsigned long lowest_affinity_level);
9810 int (*migrate_info_type)(void);
9811 + enum psci_conduit conduit;
9812 + enum smccc_version smccc_version;
9813 };
9814
9815 extern struct psci_operations psci_ops;
9816 diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
9817 index a8b7bf879ced..1a1df0d21ee3 100644
9818 --- a/include/scsi/scsi_host.h
9819 +++ b/include/scsi/scsi_host.h
9820 @@ -571,6 +571,8 @@ struct Scsi_Host {
9821 struct blk_mq_tag_set tag_set;
9822 };
9823
9824 + struct rcu_head rcu;
9825 +
9826 atomic_t host_busy; /* commands actually active on low-level */
9827 atomic_t host_blocked;
9828
9829 diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
9830 index 760e52a9640f..b3bcabe380da 100644
9831 --- a/include/uapi/linux/psci.h
9832 +++ b/include/uapi/linux/psci.h
9833 @@ -88,6 +88,9 @@
9834 (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
9835 #define PSCI_VERSION_MINOR(ver) \
9836 ((ver) & PSCI_VERSION_MINOR_MASK)
9837 +#define PSCI_VERSION(maj, min) \
9838 + ((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
9839 + ((min) & PSCI_VERSION_MINOR_MASK))
9840
9841 /* PSCI features decoding (>=1.0) */
9842 #define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1
9843 diff --git a/kernel/async.c b/kernel/async.c
9844 index 2cbd3dd5940d..a893d6170944 100644
9845 --- a/kernel/async.c
9846 +++ b/kernel/async.c
9847 @@ -84,20 +84,24 @@ static atomic_t entry_count;
9848
9849 static async_cookie_t lowest_in_progress(struct async_domain *domain)
9850 {
9851 - struct list_head *pending;
9852 + struct async_entry *first = NULL;
9853 async_cookie_t ret = ASYNC_COOKIE_MAX;
9854 unsigned long flags;
9855
9856 spin_lock_irqsave(&async_lock, flags);
9857
9858 - if (domain)
9859 - pending = &domain->pending;
9860 - else
9861 - pending = &async_global_pending;
9862 + if (domain) {
9863 + if (!list_empty(&domain->pending))
9864 + first = list_first_entry(&domain->pending,
9865 + struct async_entry, domain_list);
9866 + } else {
9867 + if (!list_empty(&async_global_pending))
9868 + first = list_first_entry(&async_global_pending,
9869 + struct async_entry, global_list);
9870 + }
9871
9872 - if (!list_empty(pending))
9873 - ret = list_first_entry(pending, struct async_entry,
9874 - domain_list)->cookie;
9875 + if (first)
9876 + ret = first->cookie;
9877
9878 spin_unlock_irqrestore(&async_lock, flags);
9879 return ret;
9880 diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
9881 index 5033b66d2753..7a577bd989a4 100644
9882 --- a/kernel/rcu/update.c
9883 +++ b/kernel/rcu/update.c
9884 @@ -421,11 +421,13 @@ void init_rcu_head(struct rcu_head *head)
9885 {
9886 debug_object_init(head, &rcuhead_debug_descr);
9887 }
9888 +EXPORT_SYMBOL_GPL(init_rcu_head);
9889
9890 void destroy_rcu_head(struct rcu_head *head)
9891 {
9892 debug_object_free(head, &rcuhead_debug_descr);
9893 }
9894 +EXPORT_SYMBOL_GPL(destroy_rcu_head);
9895
9896 static bool rcuhead_is_static_object(void *addr)
9897 {
9898 diff --git a/kernel/relay.c b/kernel/relay.c
9899 index 39a9dfc69486..55da824f4adc 100644
9900 --- a/kernel/relay.c
9901 +++ b/kernel/relay.c
9902 @@ -611,7 +611,6 @@ struct rchan *relay_open(const char *base_filename,
9903
9904 kref_put(&chan->kref, relay_destroy_channel);
9905 mutex_unlock(&relay_channels_mutex);
9906 - kfree(chan);
9907 return NULL;
9908 }
9909 EXPORT_SYMBOL_GPL(relay_open);
9910 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
9911 index 7464c5c4de46..298f62b8662d 100644
9912 --- a/kernel/sched/rt.c
9913 +++ b/kernel/sched/rt.c
9914 @@ -1907,9 +1907,8 @@ static void push_rt_tasks(struct rq *rq)
9915 * the rt_loop_next will cause the iterator to perform another scan.
9916 *
9917 */
9918 -static int rto_next_cpu(struct rq *rq)
9919 +static int rto_next_cpu(struct root_domain *rd)
9920 {
9921 - struct root_domain *rd = rq->rd;
9922 int next;
9923 int cpu;
9924
9925 @@ -1985,19 +1984,24 @@ static void tell_cpu_to_push(struct rq *rq)
9926 * Otherwise it is finishing up and an ipi needs to be sent.
9927 */
9928 if (rq->rd->rto_cpu < 0)
9929 - cpu = rto_next_cpu(rq);
9930 + cpu = rto_next_cpu(rq->rd);
9931
9932 raw_spin_unlock(&rq->rd->rto_lock);
9933
9934 rto_start_unlock(&rq->rd->rto_loop_start);
9935
9936 - if (cpu >= 0)
9937 + if (cpu >= 0) {
9938 + /* Make sure the rd does not get freed while pushing */
9939 + sched_get_rd(rq->rd);
9940 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
9941 + }
9942 }
9943
9944 /* Called from hardirq context */
9945 void rto_push_irq_work_func(struct irq_work *work)
9946 {
9947 + struct root_domain *rd =
9948 + container_of(work, struct root_domain, rto_push_work);
9949 struct rq *rq;
9950 int cpu;
9951
9952 @@ -2013,18 +2017,20 @@ void rto_push_irq_work_func(struct irq_work *work)
9953 raw_spin_unlock(&rq->lock);
9954 }
9955
9956 - raw_spin_lock(&rq->rd->rto_lock);
9957 + raw_spin_lock(&rd->rto_lock);
9958
9959 /* Pass the IPI to the next rt overloaded queue */
9960 - cpu = rto_next_cpu(rq);
9961 + cpu = rto_next_cpu(rd);
9962
9963 - raw_spin_unlock(&rq->rd->rto_lock);
9964 + raw_spin_unlock(&rd->rto_lock);
9965
9966 - if (cpu < 0)
9967 + if (cpu < 0) {
9968 + sched_put_rd(rd);
9969 return;
9970 + }
9971
9972 /* Try the next RT overloaded CPU */
9973 - irq_work_queue_on(&rq->rd->rto_push_work, cpu);
9974 + irq_work_queue_on(&rd->rto_push_work, cpu);
9975 }
9976 #endif /* HAVE_RT_PUSH_IPI */
9977
9978 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
9979 index b732e779fe7d..307c35d33660 100644
9980 --- a/kernel/sched/sched.h
9981 +++ b/kernel/sched/sched.h
9982 @@ -661,6 +661,8 @@ extern struct mutex sched_domains_mutex;
9983 extern void init_defrootdomain(void);
9984 extern int sched_init_domains(const struct cpumask *cpu_map);
9985 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
9986 +extern void sched_get_rd(struct root_domain *rd);
9987 +extern void sched_put_rd(struct root_domain *rd);
9988
9989 #ifdef HAVE_RT_PUSH_IPI
9990 extern void rto_push_irq_work_func(struct irq_work *work);
9991 diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
9992 index 093f2ceba2e2..659e075ef70b 100644
9993 --- a/kernel/sched/topology.c
9994 +++ b/kernel/sched/topology.c
9995 @@ -258,6 +258,19 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
9996 call_rcu_sched(&old_rd->rcu, free_rootdomain);
9997 }
9998
9999 +void sched_get_rd(struct root_domain *rd)
10000 +{
10001 + atomic_inc(&rd->refcount);
10002 +}
10003 +
10004 +void sched_put_rd(struct root_domain *rd)
10005 +{
10006 + if (!atomic_dec_and_test(&rd->refcount))
10007 + return;
10008 +
10009 + call_rcu_sched(&rd->rcu, free_rootdomain);
10010 +}
10011 +
10012 static int init_rootdomain(struct root_domain *rd)
10013 {
10014 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
10015 diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
10016 index 98feab7933c7..929ecb7d6b78 100644
10017 --- a/kernel/sched/wait.c
10018 +++ b/kernel/sched/wait.c
10019 @@ -27,7 +27,7 @@ void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
10020
10021 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
10022 spin_lock_irqsave(&wq_head->lock, flags);
10023 - __add_wait_queue_entry_tail(wq_head, wq_entry);
10024 + __add_wait_queue(wq_head, wq_entry);
10025 spin_unlock_irqrestore(&wq_head->lock, flags);
10026 }
10027 EXPORT_SYMBOL(add_wait_queue);
10028 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
10029 index 8319e09e15b9..7379bcf3baa0 100644
10030 --- a/kernel/trace/ftrace.c
10031 +++ b/kernel/trace/ftrace.c
10032 @@ -4488,7 +4488,6 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
10033 func_g.type = filter_parse_regex(glob, strlen(glob),
10034 &func_g.search, &not);
10035 func_g.len = strlen(func_g.search);
10036 - func_g.search = glob;
10037
10038 /* we do not support '!' for function probes */
10039 if (WARN_ON(not))
10040 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
10041 index ff21b4dbb392..00cb02daeddd 100644
10042 --- a/lib/Kconfig.debug
10043 +++ b/lib/Kconfig.debug
10044 @@ -217,7 +217,7 @@ config ENABLE_MUST_CHECK
10045 config FRAME_WARN
10046 int "Warn for stack frames larger than (needs gcc 4.4)"
10047 range 0 8192
10048 - default 0 if KASAN
10049 + default 3072 if KASAN_EXTRA
10050 default 2048 if GCC_PLUGIN_LATENT_ENTROPY
10051 default 1280 if (!64BIT && PARISC)
10052 default 1024 if (!64BIT && !PARISC)
10053 diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
10054 index bd38aab05929..3d35d062970d 100644
10055 --- a/lib/Kconfig.kasan
10056 +++ b/lib/Kconfig.kasan
10057 @@ -20,6 +20,17 @@ config KASAN
10058 Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB
10059 (the resulting kernel does not boot).
10060
10061 +config KASAN_EXTRA
10062 + bool "KAsan: extra checks"
10063 + depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST
10064 + help
10065 + This enables further checks in the kernel address sanitizer, for now
10066 + it only includes the address-use-after-scope check that can lead
10067 + to excessive kernel stack usage, frame size warnings and longer
10068 + compile time.
10069 + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more
10070 +
10071 +
10072 choice
10073 prompt "Instrumentation type"
10074 depends on KASAN
10075 diff --git a/lib/ubsan.c b/lib/ubsan.c
10076 index fb0409df1bcf..50d1d5c25deb 100644
10077 --- a/lib/ubsan.c
10078 +++ b/lib/ubsan.c
10079 @@ -265,14 +265,14 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data,
10080 }
10081 EXPORT_SYMBOL(__ubsan_handle_divrem_overflow);
10082
10083 -static void handle_null_ptr_deref(struct type_mismatch_data *data)
10084 +static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
10085 {
10086 unsigned long flags;
10087
10088 - if (suppress_report(&data->location))
10089 + if (suppress_report(data->location))
10090 return;
10091
10092 - ubsan_prologue(&data->location, &flags);
10093 + ubsan_prologue(data->location, &flags);
10094
10095 pr_err("%s null pointer of type %s\n",
10096 type_check_kinds[data->type_check_kind],
10097 @@ -281,15 +281,15 @@ static void handle_null_ptr_deref(struct type_mismatch_data *data)
10098 ubsan_epilogue(&flags);
10099 }
10100
10101 -static void handle_missaligned_access(struct type_mismatch_data *data,
10102 +static void handle_misaligned_access(struct type_mismatch_data_common *data,
10103 unsigned long ptr)
10104 {
10105 unsigned long flags;
10106
10107 - if (suppress_report(&data->location))
10108 + if (suppress_report(data->location))
10109 return;
10110
10111 - ubsan_prologue(&data->location, &flags);
10112 + ubsan_prologue(data->location, &flags);
10113
10114 pr_err("%s misaligned address %p for type %s\n",
10115 type_check_kinds[data->type_check_kind],
10116 @@ -299,15 +299,15 @@ static void handle_missaligned_access(struct type_mismatch_data *data,
10117 ubsan_epilogue(&flags);
10118 }
10119
10120 -static void handle_object_size_mismatch(struct type_mismatch_data *data,
10121 +static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
10122 unsigned long ptr)
10123 {
10124 unsigned long flags;
10125
10126 - if (suppress_report(&data->location))
10127 + if (suppress_report(data->location))
10128 return;
10129
10130 - ubsan_prologue(&data->location, &flags);
10131 + ubsan_prologue(data->location, &flags);
10132 pr_err("%s address %p with insufficient space\n",
10133 type_check_kinds[data->type_check_kind],
10134 (void *) ptr);
10135 @@ -315,19 +315,47 @@ static void handle_object_size_mismatch(struct type_mismatch_data *data,
10136 ubsan_epilogue(&flags);
10137 }
10138
10139 -void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
10140 +static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
10141 unsigned long ptr)
10142 {
10143
10144 if (!ptr)
10145 handle_null_ptr_deref(data);
10146 else if (data->alignment && !IS_ALIGNED(ptr, data->alignment))
10147 - handle_missaligned_access(data, ptr);
10148 + handle_misaligned_access(data, ptr);
10149 else
10150 handle_object_size_mismatch(data, ptr);
10151 }
10152 +
10153 +void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
10154 + unsigned long ptr)
10155 +{
10156 + struct type_mismatch_data_common common_data = {
10157 + .location = &data->location,
10158 + .type = data->type,
10159 + .alignment = data->alignment,
10160 + .type_check_kind = data->type_check_kind
10161 + };
10162 +
10163 + ubsan_type_mismatch_common(&common_data, ptr);
10164 +}
10165 EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
10166
10167 +void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
10168 + unsigned long ptr)
10169 +{
10170 +
10171 + struct type_mismatch_data_common common_data = {
10172 + .location = &data->location,
10173 + .type = data->type,
10174 + .alignment = 1UL << data->log_alignment,
10175 + .type_check_kind = data->type_check_kind
10176 + };
10177 +
10178 + ubsan_type_mismatch_common(&common_data, ptr);
10179 +}
10180 +EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
10181 +
10182 void __ubsan_handle_nonnull_return(struct nonnull_return_data *data)
10183 {
10184 unsigned long flags;
10185 diff --git a/lib/ubsan.h b/lib/ubsan.h
10186 index 88f23557edbe..7e30b26497e0 100644
10187 --- a/lib/ubsan.h
10188 +++ b/lib/ubsan.h
10189 @@ -37,6 +37,20 @@ struct type_mismatch_data {
10190 unsigned char type_check_kind;
10191 };
10192
10193 +struct type_mismatch_data_v1 {
10194 + struct source_location location;
10195 + struct type_descriptor *type;
10196 + unsigned char log_alignment;
10197 + unsigned char type_check_kind;
10198 +};
10199 +
10200 +struct type_mismatch_data_common {
10201 + struct source_location *location;
10202 + struct type_descriptor *type;
10203 + unsigned long alignment;
10204 + unsigned char type_check_kind;
10205 +};
10206 +
10207 struct nonnull_arg_data {
10208 struct source_location location;
10209 struct source_location attr_location;
10210 diff --git a/net/dccp/proto.c b/net/dccp/proto.c
10211 index b68168fcc06a..9d43c1f40274 100644
10212 --- a/net/dccp/proto.c
10213 +++ b/net/dccp/proto.c
10214 @@ -259,6 +259,7 @@ int dccp_disconnect(struct sock *sk, int flags)
10215 {
10216 struct inet_connection_sock *icsk = inet_csk(sk);
10217 struct inet_sock *inet = inet_sk(sk);
10218 + struct dccp_sock *dp = dccp_sk(sk);
10219 int err = 0;
10220 const int old_state = sk->sk_state;
10221
10222 @@ -278,6 +279,10 @@ int dccp_disconnect(struct sock *sk, int flags)
10223 sk->sk_err = ECONNRESET;
10224
10225 dccp_clear_xmit_timers(sk);
10226 + ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
10227 + ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
10228 + dp->dccps_hc_rx_ccid = NULL;
10229 + dp->dccps_hc_tx_ccid = NULL;
10230
10231 __skb_queue_purge(&sk->sk_receive_queue);
10232 __skb_queue_purge(&sk->sk_write_queue);
10233 diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
10234 index 1ce7115aa499..97a56c0b565a 100644
10235 --- a/scripts/Makefile.kasan
10236 +++ b/scripts/Makefile.kasan
10237 @@ -30,5 +30,10 @@ else
10238 endif
10239 endif
10240
10241 +ifdef CONFIG_KASAN_EXTRA
10242 CFLAGS_KASAN += $(call cc-option, -fsanitize-address-use-after-scope)
10243 endif
10244 +
10245 +CFLAGS_KASAN_NOSANITIZE := -fno-builtin
10246 +
10247 +endif
10248 diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
10249 index 04b5633df1cf..0b46136a91a8 100644
10250 --- a/scripts/Makefile.lib
10251 +++ b/scripts/Makefile.lib
10252 @@ -128,7 +128,7 @@ endif
10253 ifeq ($(CONFIG_KASAN),y)
10254 _c_flags += $(if $(patsubst n%,, \
10255 $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
10256 - $(CFLAGS_KASAN))
10257 + $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE))
10258 endif
10259
10260 ifeq ($(CONFIG_UBSAN),y)
10261 diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
10262 index e7d766d56c8e..55859c5b456f 100644
10263 --- a/sound/soc/intel/skylake/skl-nhlt.c
10264 +++ b/sound/soc/intel/skylake/skl-nhlt.c
10265 @@ -41,7 +41,8 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
10266 obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
10267 if (obj && obj->type == ACPI_TYPE_BUFFER) {
10268 nhlt_ptr = (struct nhlt_resource_desc *)obj->buffer.pointer;
10269 - nhlt_table = (struct nhlt_acpi_table *)
10270 + if (nhlt_ptr->length)
10271 + nhlt_table = (struct nhlt_acpi_table *)
10272 memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
10273 MEMREMAP_WB);
10274 ACPI_FREE(obj);
10275 diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
10276 index b6590467fd14..66fc13a2396a 100644
10277 --- a/sound/soc/rockchip/rockchip_i2s.c
10278 +++ b/sound/soc/rockchip/rockchip_i2s.c
10279 @@ -504,6 +504,7 @@ static bool rockchip_i2s_rd_reg(struct device *dev, unsigned int reg)
10280 case I2S_INTCR:
10281 case I2S_XFER:
10282 case I2S_CLR:
10283 + case I2S_TXDR:
10284 case I2S_RXDR:
10285 case I2S_FIFOLR:
10286 case I2S_INTSR:
10287 @@ -518,6 +519,9 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
10288 switch (reg) {
10289 case I2S_INTSR:
10290 case I2S_CLR:
10291 + case I2S_FIFOLR:
10292 + case I2S_TXDR:
10293 + case I2S_RXDR:
10294 return true;
10295 default:
10296 return false;
10297 @@ -527,6 +531,8 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
10298 static bool rockchip_i2s_precious_reg(struct device *dev, unsigned int reg)
10299 {
10300 switch (reg) {
10301 + case I2S_RXDR:
10302 + return true;
10303 default:
10304 return false;
10305 }
10306 diff --git a/tools/objtool/check.c b/tools/objtool/check.c
10307 index 9cd028aa1509..2e458eb45586 100644
10308 --- a/tools/objtool/check.c
10309 +++ b/tools/objtool/check.c
10310 @@ -851,8 +851,14 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func,
10311 * This is a fairly uncommon pattern which is new for GCC 6. As of this
10312 * writing, there are 11 occurrences of it in the allmodconfig kernel.
10313 *
10314 + * As of GCC 7 there are quite a few more of these and the 'in between' code
10315 + * is significant. Esp. with KASAN enabled some of the code between the mov
10316 + * and jmpq uses .rodata itself, which can confuse things.
10317 + *
10318 * TODO: Once we have DWARF CFI and smarter instruction decoding logic,
10319 * ensure the same register is used in the mov and jump instructions.
10320 + *
10321 + * NOTE: RETPOLINE made it harder still to decode dynamic jumps.
10322 */
10323 static struct rela *find_switch_table(struct objtool_file *file,
10324 struct symbol *func,
10325 @@ -874,12 +880,25 @@ static struct rela *find_switch_table(struct objtool_file *file,
10326 text_rela->addend + 4);
10327 if (!rodata_rela)
10328 return NULL;
10329 +
10330 file->ignore_unreachables = true;
10331 return rodata_rela;
10332 }
10333
10334 /* case 3 */
10335 - func_for_each_insn_continue_reverse(file, func, insn) {
10336 + /*
10337 + * Backward search using the @first_jump_src links, these help avoid
10338 + * much of the 'in between' code. Which avoids us getting confused by
10339 + * it.
10340 + */
10341 + for (insn = list_prev_entry(insn, list);
10342 +
10343 + &insn->list != &file->insn_list &&
10344 + insn->sec == func->sec &&
10345 + insn->offset >= func->offset;
10346 +
10347 + insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
10348 +
10349 if (insn->type == INSN_JUMP_DYNAMIC)
10350 break;
10351
10352 @@ -909,14 +928,32 @@ static struct rela *find_switch_table(struct objtool_file *file,
10353 return NULL;
10354 }
10355
10356 +
10357 static int add_func_switch_tables(struct objtool_file *file,
10358 struct symbol *func)
10359 {
10360 - struct instruction *insn, *prev_jump = NULL;
10361 + struct instruction *insn, *last = NULL, *prev_jump = NULL;
10362 struct rela *rela, *prev_rela = NULL;
10363 int ret;
10364
10365 func_for_each_insn(file, func, insn) {
10366 + if (!last)
10367 + last = insn;
10368 +
10369 + /*
10370 + * Store back-pointers for unconditional forward jumps such
10371 + * that find_switch_table() can back-track using those and
10372 + * avoid some potentially confusing code.
10373 + */
10374 + if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
10375 + insn->offset > last->offset &&
10376 + insn->jump_dest->offset > insn->offset &&
10377 + !insn->jump_dest->first_jump_src) {
10378 +
10379 + insn->jump_dest->first_jump_src = insn;
10380 + last = insn->jump_dest;
10381 + }
10382 +
10383 if (insn->type != INSN_JUMP_DYNAMIC)
10384 continue;
10385
10386 diff --git a/tools/objtool/check.h b/tools/objtool/check.h
10387 index dbadb304a410..23a1d065cae1 100644
10388 --- a/tools/objtool/check.h
10389 +++ b/tools/objtool/check.h
10390 @@ -47,6 +47,7 @@ struct instruction {
10391 bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
10392 struct symbol *call_dest;
10393 struct instruction *jump_dest;
10394 + struct instruction *first_jump_src;
10395 struct list_head alts;
10396 struct symbol *func;
10397 struct stack_op stack_op;
10398 diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
10399 index 9a07ee94a230..8b6c42dc1aa9 100644
10400 --- a/virt/kvm/arm/arm.c
10401 +++ b/virt/kvm/arm/arm.c
10402 @@ -29,6 +29,7 @@
10403 #include <linux/kvm.h>
10404 #include <trace/events/kvm.h>
10405 #include <kvm/arm_pmu.h>
10406 +#include <kvm/arm_psci.h>
10407
10408 #define CREATE_TRACE_POINTS
10409 #include "trace.h"
10410 @@ -44,7 +45,6 @@
10411 #include <asm/kvm_mmu.h>
10412 #include <asm/kvm_emulate.h>
10413 #include <asm/kvm_coproc.h>
10414 -#include <asm/kvm_psci.h>
10415 #include <asm/sections.h>
10416
10417 #ifdef REQUIRES_VIRT
10418 @@ -1139,7 +1139,7 @@ static void cpu_init_hyp_mode(void *dummy)
10419 pgd_ptr = kvm_mmu_get_httbr();
10420 stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
10421 hyp_stack_ptr = stack_page + PAGE_SIZE;
10422 - vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
10423 + vector_ptr = (unsigned long)kvm_get_hyp_vector();
10424
10425 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
10426 __cpu_init_stage2();
10427 @@ -1220,6 +1220,7 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
10428 cpu_hyp_reset();
10429
10430 return NOTIFY_OK;
10431 + case CPU_PM_ENTER_FAILED:
10432 case CPU_PM_EXIT:
10433 if (__this_cpu_read(kvm_arm_hardware_enabled))
10434 /* The hardware was enabled before suspend. */
10435 @@ -1384,6 +1385,12 @@ static int init_hyp_mode(void)
10436 goto out_err;
10437 }
10438
10439 + err = kvm_map_vectors();
10440 + if (err) {
10441 + kvm_err("Cannot map vectors\n");
10442 + goto out_err;
10443 + }
10444 +
10445 /*
10446 * Map the Hyp stack pages
10447 */
10448 diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
10449 index f1e363bab5e8..6919352cbf15 100644
10450 --- a/virt/kvm/arm/psci.c
10451 +++ b/virt/kvm/arm/psci.c
10452 @@ -15,16 +15,16 @@
10453 * along with this program. If not, see <http://www.gnu.org/licenses/>.
10454 */
10455
10456 +#include <linux/arm-smccc.h>
10457 #include <linux/preempt.h>
10458 #include <linux/kvm_host.h>
10459 #include <linux/wait.h>
10460
10461 #include <asm/cputype.h>
10462 #include <asm/kvm_emulate.h>
10463 -#include <asm/kvm_psci.h>
10464 #include <asm/kvm_host.h>
10465
10466 -#include <uapi/linux/psci.h>
10467 +#include <kvm/arm_psci.h>
10468
10469 /*
10470 * This is an implementation of the Power State Coordination Interface
10471 @@ -33,6 +33,38 @@
10472
10473 #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
10474
10475 +static u32 smccc_get_function(struct kvm_vcpu *vcpu)
10476 +{
10477 + return vcpu_get_reg(vcpu, 0);
10478 +}
10479 +
10480 +static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
10481 +{
10482 + return vcpu_get_reg(vcpu, 1);
10483 +}
10484 +
10485 +static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
10486 +{
10487 + return vcpu_get_reg(vcpu, 2);
10488 +}
10489 +
10490 +static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
10491 +{
10492 + return vcpu_get_reg(vcpu, 3);
10493 +}
10494 +
10495 +static void smccc_set_retval(struct kvm_vcpu *vcpu,
10496 + unsigned long a0,
10497 + unsigned long a1,
10498 + unsigned long a2,
10499 + unsigned long a3)
10500 +{
10501 + vcpu_set_reg(vcpu, 0, a0);
10502 + vcpu_set_reg(vcpu, 1, a1);
10503 + vcpu_set_reg(vcpu, 2, a2);
10504 + vcpu_set_reg(vcpu, 3, a3);
10505 +}
10506 +
10507 static unsigned long psci_affinity_mask(unsigned long affinity_level)
10508 {
10509 if (affinity_level <= 3)
10510 @@ -78,7 +110,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
10511 unsigned long context_id;
10512 phys_addr_t target_pc;
10513
10514 - cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
10515 + cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
10516 if (vcpu_mode_is_32bit(source_vcpu))
10517 cpu_id &= ~((u32) 0);
10518
10519 @@ -91,14 +123,14 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
10520 if (!vcpu)
10521 return PSCI_RET_INVALID_PARAMS;
10522 if (!vcpu->arch.power_off) {
10523 - if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
10524 + if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
10525 return PSCI_RET_ALREADY_ON;
10526 else
10527 return PSCI_RET_INVALID_PARAMS;
10528 }
10529
10530 - target_pc = vcpu_get_reg(source_vcpu, 2);
10531 - context_id = vcpu_get_reg(source_vcpu, 3);
10532 + target_pc = smccc_get_arg2(source_vcpu);
10533 + context_id = smccc_get_arg3(source_vcpu);
10534
10535 kvm_reset_vcpu(vcpu);
10536
10537 @@ -117,7 +149,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
10538 * NOTE: We always update r0 (or x0) because for PSCI v0.1
10539 * the general puspose registers are undefined upon CPU_ON.
10540 */
10541 - vcpu_set_reg(vcpu, 0, context_id);
10542 + smccc_set_retval(vcpu, context_id, 0, 0, 0);
10543 vcpu->arch.power_off = false;
10544 smp_mb(); /* Make sure the above is visible */
10545
10546 @@ -137,8 +169,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
10547 struct kvm *kvm = vcpu->kvm;
10548 struct kvm_vcpu *tmp;
10549
10550 - target_affinity = vcpu_get_reg(vcpu, 1);
10551 - lowest_affinity_level = vcpu_get_reg(vcpu, 2);
10552 + target_affinity = smccc_get_arg1(vcpu);
10553 + lowest_affinity_level = smccc_get_arg2(vcpu);
10554
10555 /* Determine target affinity mask */
10556 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
10557 @@ -200,18 +232,10 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
10558 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
10559 }
10560
10561 -int kvm_psci_version(struct kvm_vcpu *vcpu)
10562 -{
10563 - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
10564 - return KVM_ARM_PSCI_0_2;
10565 -
10566 - return KVM_ARM_PSCI_0_1;
10567 -}
10568 -
10569 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
10570 {
10571 struct kvm *kvm = vcpu->kvm;
10572 - unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
10573 + u32 psci_fn = smccc_get_function(vcpu);
10574 unsigned long val;
10575 int ret = 1;
10576
10577 @@ -221,7 +245,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
10578 * Bits[31:16] = Major Version = 0
10579 * Bits[15:0] = Minor Version = 2
10580 */
10581 - val = 2;
10582 + val = KVM_ARM_PSCI_0_2;
10583 break;
10584 case PSCI_0_2_FN_CPU_SUSPEND:
10585 case PSCI_0_2_FN64_CPU_SUSPEND:
10586 @@ -278,14 +302,56 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
10587 break;
10588 }
10589
10590 - vcpu_set_reg(vcpu, 0, val);
10591 + smccc_set_retval(vcpu, val, 0, 0, 0);
10592 + return ret;
10593 +}
10594 +
10595 +static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
10596 +{
10597 + u32 psci_fn = smccc_get_function(vcpu);
10598 + u32 feature;
10599 + unsigned long val;
10600 + int ret = 1;
10601 +
10602 + switch(psci_fn) {
10603 + case PSCI_0_2_FN_PSCI_VERSION:
10604 + val = KVM_ARM_PSCI_1_0;
10605 + break;
10606 + case PSCI_1_0_FN_PSCI_FEATURES:
10607 + feature = smccc_get_arg1(vcpu);
10608 + switch(feature) {
10609 + case PSCI_0_2_FN_PSCI_VERSION:
10610 + case PSCI_0_2_FN_CPU_SUSPEND:
10611 + case PSCI_0_2_FN64_CPU_SUSPEND:
10612 + case PSCI_0_2_FN_CPU_OFF:
10613 + case PSCI_0_2_FN_CPU_ON:
10614 + case PSCI_0_2_FN64_CPU_ON:
10615 + case PSCI_0_2_FN_AFFINITY_INFO:
10616 + case PSCI_0_2_FN64_AFFINITY_INFO:
10617 + case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
10618 + case PSCI_0_2_FN_SYSTEM_OFF:
10619 + case PSCI_0_2_FN_SYSTEM_RESET:
10620 + case PSCI_1_0_FN_PSCI_FEATURES:
10621 + case ARM_SMCCC_VERSION_FUNC_ID:
10622 + val = 0;
10623 + break;
10624 + default:
10625 + val = PSCI_RET_NOT_SUPPORTED;
10626 + break;
10627 + }
10628 + break;
10629 + default:
10630 + return kvm_psci_0_2_call(vcpu);
10631 + }
10632 +
10633 + smccc_set_retval(vcpu, val, 0, 0, 0);
10634 return ret;
10635 }
10636
10637 static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
10638 {
10639 struct kvm *kvm = vcpu->kvm;
10640 - unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
10641 + u32 psci_fn = smccc_get_function(vcpu);
10642 unsigned long val;
10643
10644 switch (psci_fn) {
10645 @@ -303,7 +369,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
10646 break;
10647 }
10648
10649 - vcpu_set_reg(vcpu, 0, val);
10650 + smccc_set_retval(vcpu, val, 0, 0, 0);
10651 return 1;
10652 }
10653
10654 @@ -321,9 +387,11 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
10655 * Errors:
10656 * -EINVAL: Unrecognized PSCI function
10657 */
10658 -int kvm_psci_call(struct kvm_vcpu *vcpu)
10659 +static int kvm_psci_call(struct kvm_vcpu *vcpu)
10660 {
10661 - switch (kvm_psci_version(vcpu)) {
10662 + switch (kvm_psci_version(vcpu, vcpu->kvm)) {
10663 + case KVM_ARM_PSCI_1_0:
10664 + return kvm_psci_1_0_call(vcpu);
10665 case KVM_ARM_PSCI_0_2:
10666 return kvm_psci_0_2_call(vcpu);
10667 case KVM_ARM_PSCI_0_1:
10668 @@ -332,3 +400,30 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
10669 return -EINVAL;
10670 };
10671 }
10672 +
10673 +int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
10674 +{
10675 + u32 func_id = smccc_get_function(vcpu);
10676 + u32 val = PSCI_RET_NOT_SUPPORTED;
10677 + u32 feature;
10678 +
10679 + switch (func_id) {
10680 + case ARM_SMCCC_VERSION_FUNC_ID:
10681 + val = ARM_SMCCC_VERSION_1_1;
10682 + break;
10683 + case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
10684 + feature = smccc_get_arg1(vcpu);
10685 + switch(feature) {
10686 + case ARM_SMCCC_ARCH_WORKAROUND_1:
10687 + if (kvm_arm_harden_branch_predictor())
10688 + val = 0;
10689 + break;
10690 + }
10691 + break;
10692 + default:
10693 + return kvm_psci_call(vcpu);
10694 + }
10695 +
10696 + smccc_set_retval(vcpu, val, 0, 0, 0);
10697 + return 1;
10698 +}