Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0132-4.19.33-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3411 - (show annotations) (download)
Fri Aug 2 11:47:42 2019 UTC (4 years, 8 months ago) by niro
File size: 151135 byte(s)
-linux-4.19.33
1 diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
2 index 647f94128a85..8e16017ff397 100644
3 --- a/Documentation/virtual/kvm/api.txt
4 +++ b/Documentation/virtual/kvm/api.txt
5 @@ -13,7 +13,7 @@ of a virtual machine. The ioctls belong to three classes
6
7 - VM ioctls: These query and set attributes that affect an entire virtual
8 machine, for example memory layout. In addition a VM ioctl is used to
9 - create virtual cpus (vcpus).
10 + create virtual cpus (vcpus) and devices.
11
12 Only run VM ioctls from the same process (address space) that was used
13 to create the VM.
14 @@ -24,6 +24,11 @@ of a virtual machine. The ioctls belong to three classes
15 Only run vcpu ioctls from the same thread that was used to create the
16 vcpu.
17
18 + - device ioctls: These query and set attributes that control the operation
19 + of a single device.
20 +
21 + device ioctls must be issued from the same process (address space) that
22 + was used to create the VM.
23
24 2. File descriptors
25 -------------------
26 @@ -32,10 +37,11 @@ The kvm API is centered around file descriptors. An initial
27 open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
28 can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
29 handle will create a VM file descriptor which can be used to issue VM
30 -ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
31 -and return a file descriptor pointing to it. Finally, ioctls on a vcpu
32 -fd can be used to control the vcpu, including the important task of
33 -actually running guest code.
34 +ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
35 +create a virtual cpu or device and return a file descriptor pointing to
36 +the new resource. Finally, ioctls on a vcpu or device fd can be used
37 +to control the vcpu or device. For vcpus, this includes the important
38 +task of actually running guest code.
39
40 In general file descriptors can be migrated among processes by means
41 of fork() and the SCM_RIGHTS facility of unix domain socket. These
42 diff --git a/Makefile b/Makefile
43 index d66c433df5b1..8de5fab711d8 100644
44 --- a/Makefile
45 +++ b/Makefile
46 @@ -1,7 +1,7 @@
47 # SPDX-License-Identifier: GPL-2.0
48 VERSION = 4
49 PATCHLEVEL = 19
50 -SUBLEVEL = 32
51 +SUBLEVEL = 33
52 EXTRAVERSION =
53 NAME = "People's Front"
54
55 @@ -948,9 +948,11 @@ mod_sign_cmd = true
56 endif
57 export mod_sign_cmd
58
59 +HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
60 +
61 ifdef CONFIG_STACK_VALIDATION
62 has_libelf := $(call try-run,\
63 - echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
64 + echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
65 ifeq ($(has_libelf),1)
66 objtool_target := tools/objtool FORCE
67 else
68 diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
69 index bfeb25aaf9a2..326e870d7123 100644
70 --- a/arch/arm/mach-imx/cpuidle-imx6q.c
71 +++ b/arch/arm/mach-imx/cpuidle-imx6q.c
72 @@ -16,30 +16,23 @@
73 #include "cpuidle.h"
74 #include "hardware.h"
75
76 -static atomic_t master = ATOMIC_INIT(0);
77 -static DEFINE_SPINLOCK(master_lock);
78 +static int num_idle_cpus = 0;
79 +static DEFINE_SPINLOCK(cpuidle_lock);
80
81 static int imx6q_enter_wait(struct cpuidle_device *dev,
82 struct cpuidle_driver *drv, int index)
83 {
84 - if (atomic_inc_return(&master) == num_online_cpus()) {
85 - /*
86 - * With this lock, we prevent other cpu to exit and enter
87 - * this function again and become the master.
88 - */
89 - if (!spin_trylock(&master_lock))
90 - goto idle;
91 + spin_lock(&cpuidle_lock);
92 + if (++num_idle_cpus == num_online_cpus())
93 imx6_set_lpm(WAIT_UNCLOCKED);
94 - cpu_do_idle();
95 - imx6_set_lpm(WAIT_CLOCKED);
96 - spin_unlock(&master_lock);
97 - goto done;
98 - }
99 + spin_unlock(&cpuidle_lock);
100
101 -idle:
102 cpu_do_idle();
103 -done:
104 - atomic_dec(&master);
105 +
106 + spin_lock(&cpuidle_lock);
107 + if (num_idle_cpus-- == num_online_cpus())
108 + imx6_set_lpm(WAIT_CLOCKED);
109 + spin_unlock(&cpuidle_lock);
110
111 return index;
112 }
113 diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
114 index 33b6f9c892c8..40a6c9261a6b 100644
115 --- a/arch/powerpc/include/asm/feature-fixups.h
116 +++ b/arch/powerpc/include/asm/feature-fixups.h
117 @@ -221,6 +221,17 @@ label##3: \
118 FTR_ENTRY_OFFSET 953b-954b; \
119 .popsection;
120
121 +#define START_BTB_FLUSH_SECTION \
122 +955: \
123 +
124 +#define END_BTB_FLUSH_SECTION \
125 +956: \
126 + .pushsection __btb_flush_fixup,"a"; \
127 + .align 2; \
128 +957: \
129 + FTR_ENTRY_OFFSET 955b-957b; \
130 + FTR_ENTRY_OFFSET 956b-957b; \
131 + .popsection;
132
133 #ifndef __ASSEMBLY__
134 #include <linux/types.h>
135 @@ -230,6 +241,7 @@ extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
136 extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
137 extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
138 extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
139 +extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
140
141 void apply_feature_fixups(void);
142 void setup_feature_keys(void);
143 diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
144 index 665af14850e4..2b7135391231 100644
145 --- a/arch/powerpc/include/asm/ppc-opcode.h
146 +++ b/arch/powerpc/include/asm/ppc-opcode.h
147 @@ -300,6 +300,7 @@
148 /* Misc instructions for BPF compiler */
149 #define PPC_INST_LBZ 0x88000000
150 #define PPC_INST_LD 0xe8000000
151 +#define PPC_INST_LDX 0x7c00002a
152 #define PPC_INST_LHZ 0xa0000000
153 #define PPC_INST_LWZ 0x80000000
154 #define PPC_INST_LHBRX 0x7c00062c
155 @@ -307,6 +308,7 @@
156 #define PPC_INST_STB 0x98000000
157 #define PPC_INST_STH 0xb0000000
158 #define PPC_INST_STD 0xf8000000
159 +#define PPC_INST_STDX 0x7c00012a
160 #define PPC_INST_STDU 0xf8000001
161 #define PPC_INST_STW 0x90000000
162 #define PPC_INST_STWU 0x94000000
163 diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
164 index b5d023680801..5c901bf4c505 100644
165 --- a/arch/powerpc/include/asm/ppc_asm.h
166 +++ b/arch/powerpc/include/asm/ppc_asm.h
167 @@ -821,4 +821,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
168 stringify_in_c(.long (_target) - . ;) \
169 stringify_in_c(.previous)
170
171 +#ifdef CONFIG_PPC_FSL_BOOK3E
172 +#define BTB_FLUSH(reg) \
173 + lis reg,BUCSR_INIT@h; \
174 + ori reg,reg,BUCSR_INIT@l; \
175 + mtspr SPRN_BUCSR,reg; \
176 + isync;
177 +#else
178 +#define BTB_FLUSH(reg)
179 +#endif /* CONFIG_PPC_FSL_BOOK3E */
180 +
181 #endif /* _ASM_POWERPC_PPC_ASM_H */
182 diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
183 index 1fffbba8d6a5..65676e2325b8 100644
184 --- a/arch/powerpc/include/asm/setup.h
185 +++ b/arch/powerpc/include/asm/setup.h
186 @@ -67,6 +67,13 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
187 static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
188 #endif
189
190 +#ifdef CONFIG_PPC_FSL_BOOK3E
191 +void setup_spectre_v2(void);
192 +#else
193 +static inline void setup_spectre_v2(void) {};
194 +#endif
195 +void do_btb_flush_fixups(void);
196 +
197 #endif /* !__ASSEMBLY__ */
198
199 #endif /* _ASM_POWERPC_SETUP_H */
200 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
201 index 2206912ea4f0..c806a3c12592 100644
202 --- a/arch/powerpc/kernel/entry_64.S
203 +++ b/arch/powerpc/kernel/entry_64.S
204 @@ -80,6 +80,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
205 std r0,GPR0(r1)
206 std r10,GPR1(r1)
207 beq 2f /* if from kernel mode */
208 +#ifdef CONFIG_PPC_FSL_BOOK3E
209 +START_BTB_FLUSH_SECTION
210 + BTB_FLUSH(r10)
211 +END_BTB_FLUSH_SECTION
212 +#endif
213 ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
214 2: std r2,GPR2(r1)
215 std r3,GPR3(r1)
216 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
217 index 6d6e144a28ce..447defdd4503 100644
218 --- a/arch/powerpc/kernel/exceptions-64e.S
219 +++ b/arch/powerpc/kernel/exceptions-64e.S
220 @@ -296,7 +296,8 @@ ret_from_mc_except:
221 andi. r10,r11,MSR_PR; /* save stack pointer */ \
222 beq 1f; /* branch around if supervisor */ \
223 ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\
224 -1: cmpdi cr1,r1,0; /* check if SP makes sense */ \
225 +1: type##_BTB_FLUSH \
226 + cmpdi cr1,r1,0; /* check if SP makes sense */ \
227 bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
228 mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
229
230 @@ -328,6 +329,30 @@ ret_from_mc_except:
231 #define SPRN_MC_SRR0 SPRN_MCSRR0
232 #define SPRN_MC_SRR1 SPRN_MCSRR1
233
234 +#ifdef CONFIG_PPC_FSL_BOOK3E
235 +#define GEN_BTB_FLUSH \
236 + START_BTB_FLUSH_SECTION \
237 + beq 1f; \
238 + BTB_FLUSH(r10) \
239 + 1: \
240 + END_BTB_FLUSH_SECTION
241 +
242 +#define CRIT_BTB_FLUSH \
243 + START_BTB_FLUSH_SECTION \
244 + BTB_FLUSH(r10) \
245 + END_BTB_FLUSH_SECTION
246 +
247 +#define DBG_BTB_FLUSH CRIT_BTB_FLUSH
248 +#define MC_BTB_FLUSH CRIT_BTB_FLUSH
249 +#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
250 +#else
251 +#define GEN_BTB_FLUSH
252 +#define CRIT_BTB_FLUSH
253 +#define DBG_BTB_FLUSH
254 +#define MC_BTB_FLUSH
255 +#define GDBELL_BTB_FLUSH
256 +#endif
257 +
258 #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
259 EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
260
261 diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
262 index d0862a100d29..306e26c073a0 100644
263 --- a/arch/powerpc/kernel/head_booke.h
264 +++ b/arch/powerpc/kernel/head_booke.h
265 @@ -32,6 +32,16 @@
266 */
267 #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
268
269 +#ifdef CONFIG_PPC_FSL_BOOK3E
270 +#define BOOKE_CLEAR_BTB(reg) \
271 +START_BTB_FLUSH_SECTION \
272 + BTB_FLUSH(reg) \
273 +END_BTB_FLUSH_SECTION
274 +#else
275 +#define BOOKE_CLEAR_BTB(reg)
276 +#endif
277 +
278 +
279 #define NORMAL_EXCEPTION_PROLOG(intno) \
280 mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
281 mfspr r10, SPRN_SPRG_THREAD; \
282 @@ -43,6 +53,7 @@
283 andi. r11, r11, MSR_PR; /* check whether user or kernel */\
284 mr r11, r1; \
285 beq 1f; \
286 + BOOKE_CLEAR_BTB(r11) \
287 /* if from user, start at top of this thread's kernel stack */ \
288 lwz r11, THREAD_INFO-THREAD(r10); \
289 ALLOC_STACK_FRAME(r11, THREAD_SIZE); \
290 @@ -128,6 +139,7 @@
291 stw r9,_CCR(r8); /* save CR on stack */\
292 mfspr r11,exc_level_srr1; /* check whether user or kernel */\
293 DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
294 + BOOKE_CLEAR_BTB(r10) \
295 andi. r11,r11,MSR_PR; \
296 mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
297 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
298 diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
299 index e2750b856c8f..2386ce2a9c6e 100644
300 --- a/arch/powerpc/kernel/head_fsl_booke.S
301 +++ b/arch/powerpc/kernel/head_fsl_booke.S
302 @@ -453,6 +453,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
303 mfcr r13
304 stw r13, THREAD_NORMSAVE(3)(r10)
305 DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
306 +START_BTB_FLUSH_SECTION
307 + mfspr r11, SPRN_SRR1
308 + andi. r10,r11,MSR_PR
309 + beq 1f
310 + BTB_FLUSH(r10)
311 +1:
312 +END_BTB_FLUSH_SECTION
313 mfspr r10, SPRN_DEAR /* Get faulting address */
314
315 /* If we are faulting a kernel address, we have to use the
316 @@ -547,6 +554,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
317 mfcr r13
318 stw r13, THREAD_NORMSAVE(3)(r10)
319 DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
320 +START_BTB_FLUSH_SECTION
321 + mfspr r11, SPRN_SRR1
322 + andi. r10,r11,MSR_PR
323 + beq 1f
324 + BTB_FLUSH(r10)
325 +1:
326 +END_BTB_FLUSH_SECTION
327 +
328 mfspr r10, SPRN_SRR0 /* Get faulting address */
329
330 /* If we are faulting a kernel address, we have to use the
331 diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
332 index 1b395b85132b..1341325599a7 100644
333 --- a/arch/powerpc/kernel/security.c
334 +++ b/arch/powerpc/kernel/security.c
335 @@ -26,6 +26,10 @@ static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NO
336
337 bool barrier_nospec_enabled;
338 static bool no_nospec;
339 +static bool btb_flush_enabled;
340 +#ifdef CONFIG_PPC_FSL_BOOK3E
341 +static bool no_spectrev2;
342 +#endif
343
344 static void enable_barrier_nospec(bool enable)
345 {
346 @@ -101,6 +105,23 @@ static __init int barrier_nospec_debugfs_init(void)
347 device_initcall(barrier_nospec_debugfs_init);
348 #endif /* CONFIG_DEBUG_FS */
349
350 +#ifdef CONFIG_PPC_FSL_BOOK3E
351 +static int __init handle_nospectre_v2(char *p)
352 +{
353 + no_spectrev2 = true;
354 +
355 + return 0;
356 +}
357 +early_param("nospectre_v2", handle_nospectre_v2);
358 +void setup_spectre_v2(void)
359 +{
360 + if (no_spectrev2)
361 + do_btb_flush_fixups();
362 + else
363 + btb_flush_enabled = true;
364 +}
365 +#endif /* CONFIG_PPC_FSL_BOOK3E */
366 +
367 #ifdef CONFIG_PPC_BOOK3S_64
368 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
369 {
370 @@ -168,31 +189,27 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
371 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
372 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
373
374 - if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
375 - bool comma = false;
376 + if (bcs || ccd) {
377 seq_buf_printf(&s, "Mitigation: ");
378
379 - if (bcs) {
380 + if (bcs)
381 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
382 - comma = true;
383 - }
384 -
385 - if (ccd) {
386 - if (comma)
387 - seq_buf_printf(&s, ", ");
388 - seq_buf_printf(&s, "Indirect branch cache disabled");
389 - comma = true;
390 - }
391
392 - if (comma)
393 + if (bcs && ccd)
394 seq_buf_printf(&s, ", ");
395
396 - seq_buf_printf(&s, "Software count cache flush");
397 + if (ccd)
398 + seq_buf_printf(&s, "Indirect branch cache disabled");
399 + } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
400 + seq_buf_printf(&s, "Mitigation: Software count cache flush");
401
402 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
403 - seq_buf_printf(&s, "(hardware accelerated)");
404 - } else
405 + seq_buf_printf(&s, " (hardware accelerated)");
406 + } else if (btb_flush_enabled) {
407 + seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
408 + } else {
409 seq_buf_printf(&s, "Vulnerable");
410 + }
411
412 seq_buf_printf(&s, "\n");
413
414 diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
415 index 93fa0c99681e..508244bcf19c 100644
416 --- a/arch/powerpc/kernel/setup-common.c
417 +++ b/arch/powerpc/kernel/setup-common.c
418 @@ -973,6 +973,7 @@ void __init setup_arch(char **cmdline_p)
419 ppc_md.setup_arch();
420
421 setup_barrier_nospec();
422 + setup_spectre_v2();
423
424 paging_init();
425
426 diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
427 index 53016c753f3c..fd35eddf3266 100644
428 --- a/arch/powerpc/kernel/vmlinux.lds.S
429 +++ b/arch/powerpc/kernel/vmlinux.lds.S
430 @@ -164,6 +164,14 @@ SECTIONS
431 }
432 #endif /* CONFIG_PPC_BARRIER_NOSPEC */
433
434 +#ifdef CONFIG_PPC_FSL_BOOK3E
435 + . = ALIGN(8);
436 + __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
437 + __start__btb_flush_fixup = .;
438 + *(__btb_flush_fixup)
439 + __stop__btb_flush_fixup = .;
440 + }
441 +#endif
442 EXCEPTION_TABLE(0)
443
444 NOTES :kernel :notes
445 diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
446 index 81bd8a07aa51..612b7f6a887f 100644
447 --- a/arch/powerpc/kvm/bookehv_interrupts.S
448 +++ b/arch/powerpc/kvm/bookehv_interrupts.S
449 @@ -75,6 +75,10 @@
450 PPC_LL r1, VCPU_HOST_STACK(r4)
451 PPC_LL r2, HOST_R2(r1)
452
453 +START_BTB_FLUSH_SECTION
454 + BTB_FLUSH(r10)
455 +END_BTB_FLUSH_SECTION
456 +
457 mfspr r10, SPRN_PID
458 lwz r8, VCPU_HOST_PID(r4)
459 PPC_LL r11, VCPU_SHARED(r4)
460 diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
461 index 3f8189eb56ed..fde1de08b4d7 100644
462 --- a/arch/powerpc/kvm/e500_emulate.c
463 +++ b/arch/powerpc/kvm/e500_emulate.c
464 @@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
465 vcpu->arch.pwrmgtcr0 = spr_val;
466 break;
467
468 + case SPRN_BUCSR:
469 + /*
470 + * If we are here, it means that we have already flushed the
471 + * branch predictor, so just return to guest.
472 + */
473 + break;
474 +
475 /* extra exceptions */
476 #ifdef CONFIG_SPE_POSSIBLE
477 case SPRN_IVOR32:
478 diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
479 index e613b02bb2f0..dbe478e7b8e0 100644
480 --- a/arch/powerpc/lib/feature-fixups.c
481 +++ b/arch/powerpc/lib/feature-fixups.c
482 @@ -347,6 +347,29 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
483
484 printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
485 }
486 +
487 +static void patch_btb_flush_section(long *curr)
488 +{
489 + unsigned int *start, *end;
490 +
491 + start = (void *)curr + *curr;
492 + end = (void *)curr + *(curr + 1);
493 + for (; start < end; start++) {
494 + pr_devel("patching dest %lx\n", (unsigned long)start);
495 + patch_instruction(start, PPC_INST_NOP);
496 + }
497 +}
498 +
499 +void do_btb_flush_fixups(void)
500 +{
501 + long *start, *end;
502 +
503 + start = PTRRELOC(&__start__btb_flush_fixup);
504 + end = PTRRELOC(&__stop__btb_flush_fixup);
505 +
506 + for (; start < end; start += 2)
507 + patch_btb_flush_section(start);
508 +}
509 #endif /* CONFIG_PPC_FSL_BOOK3E */
510
511 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
512 diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
513 index 844d8e774492..b7f6f6e0b6e8 100644
514 --- a/arch/powerpc/lib/memcmp_64.S
515 +++ b/arch/powerpc/lib/memcmp_64.S
516 @@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp)
517 beq .Lzero
518
519 .Lcmp_rest_lt8bytes:
520 - /* Here we have only less than 8 bytes to compare with. at least s1
521 - * Address is aligned with 8 bytes.
522 - * The next double words are load and shift right with appropriate
523 - * bits.
524 + /*
525 + * Here we have less than 8 bytes to compare. At least s1 is aligned to
526 + * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
527 + * page boundary, otherwise we might read past the end of the buffer and
528 + * trigger a page fault. We use 4K as the conservative minimum page
529 + * size. If we detect that case we go to the byte-by-byte loop.
530 + *
531 + * Otherwise the next double word is loaded from s1 and s2, and shifted
532 + * right to compare the appropriate bits.
533 */
534 + clrldi r6,r4,(64-12) // r6 = r4 & 0xfff
535 + cmpdi r6,0xff8
536 + bgt .Lshort
537 +
538 subfic r6,r5,8
539 slwi r6,r6,3
540 LD rA,0,r3
541 diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
542 index 7fd20c52a8ec..9ed90064f542 100644
543 --- a/arch/powerpc/mm/tlb_low_64e.S
544 +++ b/arch/powerpc/mm/tlb_low_64e.S
545 @@ -70,6 +70,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
546 std r15,EX_TLB_R15(r12)
547 std r10,EX_TLB_CR(r12)
548 #ifdef CONFIG_PPC_FSL_BOOK3E
549 +START_BTB_FLUSH_SECTION
550 + mfspr r11, SPRN_SRR1
551 + andi. r10,r11,MSR_PR
552 + beq 1f
553 + BTB_FLUSH(r10)
554 +1:
555 +END_BTB_FLUSH_SECTION
556 std r7,EX_TLB_R7(r12)
557 #endif
558 TLB_MISS_PROLOG_STATS
559 diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
560 index 47fc6660845d..68dece206048 100644
561 --- a/arch/powerpc/net/bpf_jit.h
562 +++ b/arch/powerpc/net/bpf_jit.h
563 @@ -51,6 +51,8 @@
564 #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
565 #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
566 ___PPC_RA(base) | ((i) & 0xfffc))
567 +#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \
568 + ___PPC_RA(base) | ___PPC_RB(b))
569 #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
570 ___PPC_RA(base) | ((i) & 0xfffc))
571 #define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
572 @@ -65,7 +67,9 @@
573 #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
574 ___PPC_RA(base) | IMM_L(i))
575 #define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
576 - ___PPC_RA(base) | IMM_L(i))
577 + ___PPC_RA(base) | ((i) & 0xfffc))
578 +#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \
579 + ___PPC_RA(base) | ___PPC_RB(b))
580 #define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
581 ___PPC_RA(base) | IMM_L(i))
582 #define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
583 @@ -85,17 +89,6 @@
584 ___PPC_RA(a) | ___PPC_RB(b))
585 #define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \
586 ___PPC_RA(a) | ___PPC_RB(b))
587 -
588 -#ifdef CONFIG_PPC64
589 -#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
590 -#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
591 -#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
592 -#else
593 -#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
594 -#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
595 -#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
596 -#endif
597 -
598 #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
599 #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
600 #define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \
601 diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
602 index 6f4daacad296..ade04547703f 100644
603 --- a/arch/powerpc/net/bpf_jit32.h
604 +++ b/arch/powerpc/net/bpf_jit32.h
605 @@ -123,6 +123,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
606 #define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
607 #endif
608
609 +#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
610 +#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
611 +#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
612 +
613 #define SEEN_DATAREF 0x10000 /* might call external helpers */
614 #define SEEN_XREG 0x20000 /* X reg is used */
615 #define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
616 diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
617 index 3609be4692b3..47f441f351a6 100644
618 --- a/arch/powerpc/net/bpf_jit64.h
619 +++ b/arch/powerpc/net/bpf_jit64.h
620 @@ -68,6 +68,26 @@ static const int b2p[] = {
621 /* PPC NVR range -- update this if we ever use NVRs below r27 */
622 #define BPF_PPC_NVR_MIN 27
623
624 +/*
625 + * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
626 + * so ensure that it isn't in use already.
627 + */
628 +#define PPC_BPF_LL(r, base, i) do { \
629 + if ((i) % 4) { \
630 + PPC_LI(b2p[TMP_REG_2], (i)); \
631 + PPC_LDX(r, base, b2p[TMP_REG_2]); \
632 + } else \
633 + PPC_LD(r, base, i); \
634 + } while(0)
635 +#define PPC_BPF_STL(r, base, i) do { \
636 + if ((i) % 4) { \
637 + PPC_LI(b2p[TMP_REG_2], (i)); \
638 + PPC_STDX(r, base, b2p[TMP_REG_2]); \
639 + } else \
640 + PPC_STD(r, base, i); \
641 + } while(0)
642 +#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
643 +
644 #define SEEN_FUNC 0x1000 /* might call external helpers */
645 #define SEEN_STACK 0x2000 /* uses BPF stack */
646 #define SEEN_TAILCALL 0x4000 /* uses tail calls */
647 diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
648 index 50b129785aee..226eec62d125 100644
649 --- a/arch/powerpc/net/bpf_jit_comp64.c
650 +++ b/arch/powerpc/net/bpf_jit_comp64.c
651 @@ -226,7 +226,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
652 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
653 * goto out;
654 */
655 - PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
656 + PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
657 PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
658 PPC_BCC(COND_GT, out);
659
660 @@ -239,7 +239,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
661 /* prog = array->ptrs[index]; */
662 PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
663 PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
664 - PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
665 + PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
666
667 /*
668 * if (prog == NULL)
669 @@ -249,7 +249,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
670 PPC_BCC(COND_EQ, out);
671
672 /* goto *(prog->bpf_func + prologue_size); */
673 - PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
674 + PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
675 #ifdef PPC64_ELF_ABI_v1
676 /* skip past the function descriptor */
677 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
678 @@ -573,7 +573,7 @@ bpf_alu32_trunc:
679 * the instructions generated will remain the
680 * same across all passes
681 */
682 - PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
683 + PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
684 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
685 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
686 break;
687 @@ -629,7 +629,7 @@ emit_clear:
688 PPC_LI32(b2p[TMP_REG_1], imm);
689 src_reg = b2p[TMP_REG_1];
690 }
691 - PPC_STD(src_reg, dst_reg, off);
692 + PPC_BPF_STL(src_reg, dst_reg, off);
693 break;
694
695 /*
696 @@ -676,7 +676,7 @@ emit_clear:
697 break;
698 /* dst = *(u64 *)(ul) (src + off) */
699 case BPF_LDX | BPF_MEM | BPF_DW:
700 - PPC_LD(dst_reg, src_reg, off);
701 + PPC_BPF_LL(dst_reg, src_reg, off);
702 break;
703
704 /*
705 diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
706 index 6ed22127391b..921f12182f3e 100644
707 --- a/arch/powerpc/platforms/pseries/pseries_energy.c
708 +++ b/arch/powerpc/platforms/pseries/pseries_energy.c
709 @@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu)
710
711 ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
712 } else {
713 - const __be32 *indexes;
714 -
715 - indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
716 - if (indexes == NULL)
717 - goto err_of_node_put;
718 + u32 nr_drc_indexes, thread_drc_index;
719
720 /*
721 - * The first element indexes[0] is the number of drc_indexes
722 - * returned in the list. Hence thread_index+1 will get the
723 - * drc_index corresponding to core number thread_index.
724 + * The first element of ibm,drc-indexes array is the
725 + * number of drc_indexes returned in the list. Hence
726 + * thread_index+1 will get the drc_index corresponding
727 + * to core number thread_index.
728 */
729 - ret = indexes[thread_index + 1];
730 + rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
731 + 0, &nr_drc_indexes);
732 + if (rc)
733 + goto err_of_node_put;
734 +
735 + WARN_ON_ONCE(thread_index > nr_drc_indexes);
736 + rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
737 + thread_index + 1,
738 + &thread_drc_index);
739 + if (rc)
740 + goto err_of_node_put;
741 +
742 + ret = thread_drc_index;
743 }
744
745 rc = 0;
746 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
747 index 44c6a82b7ce5..e76d16ac2776 100644
748 --- a/arch/x86/Kconfig
749 +++ b/arch/x86/Kconfig
750 @@ -2199,14 +2199,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
751 If unsure, leave at the default value.
752
753 config HOTPLUG_CPU
754 - bool "Support for hot-pluggable CPUs"
755 + def_bool y
756 depends on SMP
757 - ---help---
758 - Say Y here to allow turning CPUs off and on. CPUs can be
759 - controlled through /sys/devices/system/cpu.
760 - ( Note: power management support will enable this option
761 - automatically on SMP systems. )
762 - Say N if you want to disable CPU hotplug.
763
764 config BOOTPARAM_HOTPLUG_CPU0
765 bool "Set default setting of cpu0_hotpluggable"
766 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
767 index 46f0b621bd37..7014dba23d20 100644
768 --- a/arch/x86/include/asm/kvm_host.h
769 +++ b/arch/x86/include/asm/kvm_host.h
770 @@ -315,6 +315,7 @@ struct kvm_mmu_page {
771 };
772
773 struct kvm_pio_request {
774 + unsigned long linear_rip;
775 unsigned long count;
776 int in;
777 int port;
778 @@ -527,6 +528,7 @@ struct kvm_vcpu_arch {
779 bool tpr_access_reporting;
780 u64 ia32_xss;
781 u64 microcode_version;
782 + u64 arch_capabilities;
783
784 /*
785 * Paging state of the vcpu
786 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
787 index 4029d3783e18..f99f59625da5 100644
788 --- a/arch/x86/kvm/vmx.c
789 +++ b/arch/x86/kvm/vmx.c
790 @@ -970,7 +970,6 @@ struct vcpu_vmx {
791 u64 msr_guest_kernel_gs_base;
792 #endif
793
794 - u64 arch_capabilities;
795 u64 spec_ctrl;
796
797 u32 vm_entry_controls_shadow;
798 @@ -4104,12 +4103,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
799
800 msr_info->data = to_vmx(vcpu)->spec_ctrl;
801 break;
802 - case MSR_IA32_ARCH_CAPABILITIES:
803 - if (!msr_info->host_initiated &&
804 - !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
805 - return 1;
806 - msr_info->data = to_vmx(vcpu)->arch_capabilities;
807 - break;
808 case MSR_IA32_SYSENTER_CS:
809 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
810 break;
811 @@ -4271,11 +4264,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
812 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
813 MSR_TYPE_W);
814 break;
815 - case MSR_IA32_ARCH_CAPABILITIES:
816 - if (!msr_info->host_initiated)
817 - return 1;
818 - vmx->arch_capabilities = data;
819 - break;
820 case MSR_IA32_CR_PAT:
821 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
822 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
823 @@ -6666,8 +6654,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
824 ++vmx->nmsrs;
825 }
826
827 - vmx->arch_capabilities = kvm_get_arch_capabilities();
828 -
829 vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
830
831 /* 22.2.1, 20.8.1 */
832 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
833 index 6181ec19bed2..4a61e1609c97 100644
834 --- a/arch/x86/kvm/x86.c
835 +++ b/arch/x86/kvm/x86.c
836 @@ -2350,6 +2350,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
837 if (msr_info->host_initiated)
838 vcpu->arch.microcode_version = data;
839 break;
840 + case MSR_IA32_ARCH_CAPABILITIES:
841 + if (!msr_info->host_initiated)
842 + return 1;
843 + vcpu->arch.arch_capabilities = data;
844 + break;
845 case MSR_EFER:
846 return set_efer(vcpu, data);
847 case MSR_K7_HWCR:
848 @@ -2654,6 +2659,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
849 case MSR_IA32_UCODE_REV:
850 msr_info->data = vcpu->arch.microcode_version;
851 break;
852 + case MSR_IA32_ARCH_CAPABILITIES:
853 + if (!msr_info->host_initiated &&
854 + !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
855 + return 1;
856 + msr_info->data = vcpu->arch.arch_capabilities;
857 + break;
858 case MSR_IA32_TSC:
859 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
860 break;
861 @@ -6317,14 +6328,27 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
862 }
863 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
864
865 +static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
866 +{
867 + vcpu->arch.pio.count = 0;
868 +
869 + if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
870 + return 1;
871 +
872 + return kvm_skip_emulated_instruction(vcpu);
873 +}
874 +
875 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
876 unsigned short port)
877 {
878 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
879 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
880 size, port, &val, 1);
881 - /* do not return to emulator after return from userspace */
882 - vcpu->arch.pio.count = 0;
883 +
884 + if (!ret) {
885 + vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
886 + vcpu->arch.complete_userspace_io = complete_fast_pio_out;
887 + }
888 return ret;
889 }
890
891 @@ -6335,6 +6359,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
892 /* We should only ever be called with arch.pio.count equal to 1 */
893 BUG_ON(vcpu->arch.pio.count != 1);
894
895 + if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
896 + vcpu->arch.pio.count = 0;
897 + return 1;
898 + }
899 +
900 /* For size less than 4 we merge, else we zero extend */
901 val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
902 : 0;
903 @@ -6347,7 +6376,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
904 vcpu->arch.pio.port, &val, 1);
905 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
906
907 - return 1;
908 + return kvm_skip_emulated_instruction(vcpu);
909 }
910
911 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
912 @@ -6366,6 +6395,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
913 return ret;
914 }
915
916 + vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
917 vcpu->arch.complete_userspace_io = complete_fast_pio_in;
918
919 return 0;
920 @@ -6373,16 +6403,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
921
922 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
923 {
924 - int ret = kvm_skip_emulated_instruction(vcpu);
925 + int ret;
926
927 - /*
928 - * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
929 - * KVM_EXIT_DEBUG here.
930 - */
931 if (in)
932 - return kvm_fast_pio_in(vcpu, size, port) && ret;
933 + ret = kvm_fast_pio_in(vcpu, size, port);
934 else
935 - return kvm_fast_pio_out(vcpu, size, port) && ret;
936 + ret = kvm_fast_pio_out(vcpu, size, port);
937 + return ret && kvm_skip_emulated_instruction(vcpu);
938 }
939 EXPORT_SYMBOL_GPL(kvm_fast_pio);
940
941 @@ -8485,6 +8512,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
942
943 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
944 {
945 + vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
946 kvm_vcpu_mtrr_init(vcpu);
947 vcpu_load(vcpu);
948 kvm_vcpu_reset(vcpu, false);
949 diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
950 index 52f6152d1fcb..7ae52c17618e 100644
951 --- a/drivers/char/ipmi/ipmi_si.h
952 +++ b/drivers/char/ipmi/ipmi_si.h
953 @@ -25,7 +25,9 @@ void ipmi_irq_finish_setup(struct si_sm_io *io);
954 int ipmi_si_remove_by_dev(struct device *dev);
955 void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
956 unsigned long addr);
957 -int ipmi_si_hardcode_find_bmc(void);
958 +void ipmi_hardcode_init(void);
959 +void ipmi_si_hardcode_exit(void);
960 +int ipmi_si_hardcode_match(int addr_type, unsigned long addr);
961 void ipmi_si_platform_init(void);
962 void ipmi_si_platform_shutdown(void);
963
964 diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
965 index 10219f24546b..9ae2405c28bb 100644
966 --- a/drivers/char/ipmi/ipmi_si_hardcode.c
967 +++ b/drivers/char/ipmi/ipmi_si_hardcode.c
968 @@ -1,6 +1,7 @@
969 // SPDX-License-Identifier: GPL-2.0+
970
971 #include <linux/moduleparam.h>
972 +#include <linux/platform_device.h>
973 #include "ipmi_si.h"
974
975 #define PFX "ipmi_hardcode: "
976 @@ -11,23 +12,22 @@
977
978 #define SI_MAX_PARMS 4
979
980 -static char *si_type[SI_MAX_PARMS];
981 #define MAX_SI_TYPE_STR 30
982 -static char si_type_str[MAX_SI_TYPE_STR];
983 +static char si_type_str[MAX_SI_TYPE_STR] __initdata;
984 static unsigned long addrs[SI_MAX_PARMS];
985 static unsigned int num_addrs;
986 static unsigned int ports[SI_MAX_PARMS];
987 static unsigned int num_ports;
988 -static int irqs[SI_MAX_PARMS];
989 -static unsigned int num_irqs;
990 -static int regspacings[SI_MAX_PARMS];
991 -static unsigned int num_regspacings;
992 -static int regsizes[SI_MAX_PARMS];
993 -static unsigned int num_regsizes;
994 -static int regshifts[SI_MAX_PARMS];
995 -static unsigned int num_regshifts;
996 -static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
997 -static unsigned int num_slave_addrs;
998 +static int irqs[SI_MAX_PARMS] __initdata;
999 +static unsigned int num_irqs __initdata;
1000 +static int regspacings[SI_MAX_PARMS] __initdata;
1001 +static unsigned int num_regspacings __initdata;
1002 +static int regsizes[SI_MAX_PARMS] __initdata;
1003 +static unsigned int num_regsizes __initdata;
1004 +static int regshifts[SI_MAX_PARMS] __initdata;
1005 +static unsigned int num_regshifts __initdata;
1006 +static int slave_addrs[SI_MAX_PARMS] __initdata;
1007 +static unsigned int num_slave_addrs __initdata;
1008
1009 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1010 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1011 @@ -72,12 +72,133 @@ MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1012 " overridden by this parm. This is an array indexed"
1013 " by interface number.");
1014
1015 -int ipmi_si_hardcode_find_bmc(void)
1016 +static struct platform_device *ipmi_hc_pdevs[SI_MAX_PARMS];
1017 +
1018 +static void __init ipmi_hardcode_init_one(const char *si_type_str,
1019 + unsigned int i,
1020 + unsigned long addr,
1021 + unsigned int flags)
1022 {
1023 - int ret = -ENODEV;
1024 - int i;
1025 - struct si_sm_io io;
1026 + struct platform_device *pdev;
1027 + unsigned int num_r = 1, size;
1028 + struct resource r[4];
1029 + struct property_entry p[6];
1030 + enum si_type si_type;
1031 + unsigned int regspacing, regsize;
1032 + int rv;
1033 +
1034 + memset(p, 0, sizeof(p));
1035 + memset(r, 0, sizeof(r));
1036 +
1037 + if (!si_type_str || !*si_type_str || strcmp(si_type_str, "kcs") == 0) {
1038 + size = 2;
1039 + si_type = SI_KCS;
1040 + } else if (strcmp(si_type_str, "smic") == 0) {
1041 + size = 2;
1042 + si_type = SI_SMIC;
1043 + } else if (strcmp(si_type_str, "bt") == 0) {
1044 + size = 3;
1045 + si_type = SI_BT;
1046 + } else if (strcmp(si_type_str, "invalid") == 0) {
1047 + /*
1048 + * Allow a firmware-specified interface to be
1049 + * disabled.
1050 + */
1051 + size = 1;
1052 + si_type = SI_TYPE_INVALID;
1053 + } else {
1054 + pr_warn("Interface type specified for interface %d, was invalid: %s\n",
1055 + i, si_type_str);
1056 + return;
1057 + }
1058 +
1059 + regsize = regsizes[i];
1060 + if (regsize == 0)
1061 + regsize = DEFAULT_REGSIZE;
1062 +
1063 + p[0] = PROPERTY_ENTRY_U8("ipmi-type", si_type);
1064 + p[1] = PROPERTY_ENTRY_U8("slave-addr", slave_addrs[i]);
1065 + p[2] = PROPERTY_ENTRY_U8("addr-source", SI_HARDCODED);
1066 + p[3] = PROPERTY_ENTRY_U8("reg-shift", regshifts[i]);
1067 + p[4] = PROPERTY_ENTRY_U8("reg-size", regsize);
1068 + /* Last entry must be left NULL to terminate it. */
1069 +
1070 + /*
1071 + * Register spacing is derived from the resources in
1072 + * the IPMI platform code.
1073 + */
1074 + regspacing = regspacings[i];
1075 + if (regspacing == 0)
1076 + regspacing = regsize;
1077 +
1078 + r[0].start = addr;
1079 + r[0].end = r[0].start + regsize - 1;
1080 + r[0].name = "IPMI Address 1";
1081 + r[0].flags = flags;
1082 +
1083 + if (size > 1) {
1084 + r[1].start = r[0].start + regspacing;
1085 + r[1].end = r[1].start + regsize - 1;
1086 + r[1].name = "IPMI Address 2";
1087 + r[1].flags = flags;
1088 + num_r++;
1089 + }
1090 +
1091 + if (size > 2) {
1092 + r[2].start = r[1].start + regspacing;
1093 + r[2].end = r[2].start + regsize - 1;
1094 + r[2].name = "IPMI Address 3";
1095 + r[2].flags = flags;
1096 + num_r++;
1097 + }
1098 +
1099 + if (irqs[i]) {
1100 + r[num_r].start = irqs[i];
1101 + r[num_r].end = irqs[i];
1102 + r[num_r].name = "IPMI IRQ";
1103 + r[num_r].flags = IORESOURCE_IRQ;
1104 + num_r++;
1105 + }
1106 +
1107 + pdev = platform_device_alloc("hardcode-ipmi-si", i);
1108 + if (!pdev) {
1109 + pr_err("Error allocating IPMI platform device %d\n", i);
1110 + return;
1111 + }
1112 +
1113 + rv = platform_device_add_resources(pdev, r, num_r);
1114 + if (rv) {
1115 + dev_err(&pdev->dev,
1116 + "Unable to add hard-code resources: %d\n", rv);
1117 + goto err;
1118 + }
1119 +
1120 + rv = platform_device_add_properties(pdev, p);
1121 + if (rv) {
1122 + dev_err(&pdev->dev,
1123 + "Unable to add hard-code properties: %d\n", rv);
1124 + goto err;
1125 + }
1126 +
1127 + rv = platform_device_add(pdev);
1128 + if (rv) {
1129 + dev_err(&pdev->dev,
1130 + "Unable to add hard-code device: %d\n", rv);
1131 + goto err;
1132 + }
1133 +
1134 + ipmi_hc_pdevs[i] = pdev;
1135 + return;
1136 +
1137 +err:
1138 + platform_device_put(pdev);
1139 +}
1140 +
1141 +void __init ipmi_hardcode_init(void)
1142 +{
1143 + unsigned int i;
1144 char *str;
1145 + char *si_type[SI_MAX_PARMS];
1146
1147 /* Parse out the si_type string into its components. */
1148 str = si_type_str;
1149 @@ -94,54 +215,45 @@ int ipmi_si_hardcode_find_bmc(void)
1150 }
1151 }
1152
1153 - memset(&io, 0, sizeof(io));
1154 for (i = 0; i < SI_MAX_PARMS; i++) {
1155 - if (!ports[i] && !addrs[i])
1156 - continue;
1157 -
1158 - io.addr_source = SI_HARDCODED;
1159 - pr_info(PFX "probing via hardcoded address\n");
1160 -
1161 - if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1162 - io.si_type = SI_KCS;
1163 - } else if (strcmp(si_type[i], "smic") == 0) {
1164 - io.si_type = SI_SMIC;
1165 - } else if (strcmp(si_type[i], "bt") == 0) {
1166 - io.si_type = SI_BT;
1167 - } else {
1168 - pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
1169 - i, si_type[i]);
1170 - continue;
1171 - }
1172 + if (i < num_ports && ports[i])
1173 + ipmi_hardcode_init_one(si_type[i], i, ports[i],
1174 + IORESOURCE_IO);
1175 + if (i < num_addrs && addrs[i])
1176 + ipmi_hardcode_init_one(si_type[i], i, addrs[i],
1177 + IORESOURCE_MEM);
1178 + }
1179 +}
1180
1181 - if (ports[i]) {
1182 - /* An I/O port */
1183 - io.addr_data = ports[i];
1184 - io.addr_type = IPMI_IO_ADDR_SPACE;
1185 - } else if (addrs[i]) {
1186 - /* A memory port */
1187 - io.addr_data = addrs[i];
1188 - io.addr_type = IPMI_MEM_ADDR_SPACE;
1189 - } else {
1190 - pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
1191 - i);
1192 - continue;
1193 - }
1194 +void ipmi_si_hardcode_exit(void)
1195 +{
1196 + unsigned int i;
1197
1198 - io.addr = NULL;
1199 - io.regspacing = regspacings[i];
1200 - if (!io.regspacing)
1201 - io.regspacing = DEFAULT_REGSPACING;
1202 - io.regsize = regsizes[i];
1203 - if (!io.regsize)
1204 - io.regsize = DEFAULT_REGSIZE;
1205 - io.regshift = regshifts[i];
1206 - io.irq = irqs[i];
1207 - if (io.irq)
1208 - io.irq_setup = ipmi_std_irq_setup;
1209 - io.slave_addr = slave_addrs[i];
1210 -
1211 - ret = ipmi_si_add_smi(&io);
1212 + for (i = 0; i < SI_MAX_PARMS; i++) {
1213 + if (ipmi_hc_pdevs[i])
1214 + platform_device_unregister(ipmi_hc_pdevs[i]);
1215 }
1216 - return ret;
1217 +}
1218 +
1219 +/*
1220 + * Returns true of the given address exists as a hardcoded address,
1221 + * false if not.
1222 + */
1223 +int ipmi_si_hardcode_match(int addr_type, unsigned long addr)
1224 +{
1225 + unsigned int i;
1226 +
1227 + if (addr_type == IPMI_IO_ADDR_SPACE) {
1228 + for (i = 0; i < num_ports; i++) {
1229 + if (ports[i] == addr)
1230 + return 1;
1231 + }
1232 + } else {
1233 + for (i = 0; i < num_addrs; i++) {
1234 + if (addrs[i] == addr)
1235 + return 1;
1236 + }
1237 + }
1238 +
1239 + return 0;
1240 }
1241 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
1242 index 82d831b103f9..75e5006f395a 100644
1243 --- a/drivers/char/ipmi/ipmi_si_intf.c
1244 +++ b/drivers/char/ipmi/ipmi_si_intf.c
1245 @@ -1862,6 +1862,18 @@ int ipmi_si_add_smi(struct si_sm_io *io)
1246 int rv = 0;
1247 struct smi_info *new_smi, *dup;
1248
1249 + /*
1250 + * If the user gave us a hard-coded device at the same
1251 + * address, they presumably want us to use it and not what is
1252 + * in the firmware.
1253 + */
1254 + if (io->addr_source != SI_HARDCODED &&
1255 + ipmi_si_hardcode_match(io->addr_type, io->addr_data)) {
1256 + dev_info(io->dev,
1257 + "Hard-coded device at this address already exists");
1258 + return -ENODEV;
1259 + }
1260 +
1261 if (!io->io_setup) {
1262 if (io->addr_type == IPMI_IO_ADDR_SPACE) {
1263 io->io_setup = ipmi_si_port_setup;
1264 @@ -2094,7 +2106,7 @@ static int try_smi_init(struct smi_info *new_smi)
1265 return rv;
1266 }
1267
1268 -static int init_ipmi_si(void)
1269 +static int __init init_ipmi_si(void)
1270 {
1271 struct smi_info *e;
1272 enum ipmi_addr_src type = SI_INVALID;
1273 @@ -2102,12 +2114,9 @@ static int init_ipmi_si(void)
1274 if (initialized)
1275 return 0;
1276
1277 + ipmi_hardcode_init();
1278 pr_info("IPMI System Interface driver.\n");
1279
1280 - /* If the user gave us a device, they presumably want us to use it */
1281 - if (!ipmi_si_hardcode_find_bmc())
1282 - goto do_scan;
1283 -
1284 ipmi_si_platform_init();
1285
1286 ipmi_si_pci_init();
1287 @@ -2118,7 +2127,6 @@ static int init_ipmi_si(void)
1288 with multiple BMCs we assume that there will be several instances
1289 of a given type so if we succeed in registering a type then also
1290 try to register everything else of the same type */
1291 -do_scan:
1292 mutex_lock(&smi_infos_lock);
1293 list_for_each_entry(e, &smi_infos, link) {
1294 /* Try to register a device if it has an IRQ and we either
1295 @@ -2304,6 +2312,8 @@ static void cleanup_ipmi_si(void)
1296 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
1297 cleanup_one_si(e);
1298 mutex_unlock(&smi_infos_lock);
1299 +
1300 + ipmi_si_hardcode_exit();
1301 }
1302 module_exit(cleanup_ipmi_si);
1303
1304 diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
1305 index bf69927502bd..d32b0dd377c5 100644
1306 --- a/drivers/char/ipmi/ipmi_si_platform.c
1307 +++ b/drivers/char/ipmi/ipmi_si_platform.c
1308 @@ -126,8 +126,6 @@ ipmi_get_info_from_resources(struct platform_device *pdev,
1309 if (res_second->start > io->addr_data)
1310 io->regspacing = res_second->start - io->addr_data;
1311 }
1312 - io->regsize = DEFAULT_REGSIZE;
1313 - io->regshift = 0;
1314
1315 return res;
1316 }
1317 @@ -135,7 +133,7 @@ ipmi_get_info_from_resources(struct platform_device *pdev,
1318 static int platform_ipmi_probe(struct platform_device *pdev)
1319 {
1320 struct si_sm_io io;
1321 - u8 type, slave_addr, addr_source;
1322 + u8 type, slave_addr, addr_source, regsize, regshift;
1323 int rv;
1324
1325 rv = device_property_read_u8(&pdev->dev, "addr-source", &addr_source);
1326 @@ -147,7 +145,7 @@ static int platform_ipmi_probe(struct platform_device *pdev)
1327 if (addr_source == SI_SMBIOS) {
1328 if (!si_trydmi)
1329 return -ENODEV;
1330 - } else {
1331 + } else if (addr_source != SI_HARDCODED) {
1332 if (!si_tryplatform)
1333 return -ENODEV;
1334 }
1335 @@ -167,11 +165,23 @@ static int platform_ipmi_probe(struct platform_device *pdev)
1336 case SI_BT:
1337 io.si_type = type;
1338 break;
1339 + case SI_TYPE_INVALID: /* User disabled this in hardcode. */
1340 + return -ENODEV;
1341 default:
1342 dev_err(&pdev->dev, "ipmi-type property is invalid\n");
1343 return -EINVAL;
1344 }
1345
1346 + io.regsize = DEFAULT_REGSIZE;
1347 + rv = device_property_read_u8(&pdev->dev, "reg-size", &regsize);
1348 + if (!rv)
1349 + io.regsize = regsize;
1350 +
1351 + io.regshift = 0;
1352 + rv = device_property_read_u8(&pdev->dev, "reg-shift", &regshift);
1353 + if (!rv)
1354 + io.regshift = regshift;
1355 +
1356 if (!ipmi_get_info_from_resources(pdev, &io))
1357 return -EINVAL;
1358
1359 @@ -191,7 +201,8 @@ static int platform_ipmi_probe(struct platform_device *pdev)
1360
1361 io.dev = &pdev->dev;
1362
1363 - pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
1364 + pr_info("ipmi_si: %s: %s %#lx regsize %d spacing %d irq %d\n",
1365 + ipmi_addr_src_to_str(addr_source),
1366 (io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
1367 io.addr_data, io.regsize, io.regspacing, io.irq);
1368
1369 @@ -356,6 +367,9 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
1370 goto err_free;
1371 }
1372
1373 + io.regsize = DEFAULT_REGSIZE;
1374 + io.regshift = 0;
1375 +
1376 res = ipmi_get_info_from_resources(pdev, &io);
1377 if (!res) {
1378 rv = -EINVAL;
1379 @@ -417,6 +431,11 @@ static int ipmi_remove(struct platform_device *pdev)
1380 return ipmi_si_remove_by_dev(&pdev->dev);
1381 }
1382
1383 +static const struct platform_device_id si_plat_ids[] = {
1384 + { "hardcode-ipmi-si", 0 },
1385 + { }
1386 +};
1387 +
1388 struct platform_driver ipmi_platform_driver = {
1389 .driver = {
1390 .name = DEVICE_NAME,
1391 @@ -425,6 +444,7 @@ struct platform_driver ipmi_platform_driver = {
1392 },
1393 .probe = ipmi_probe,
1394 .remove = ipmi_remove,
1395 + .id_table = si_plat_ids
1396 };
1397
1398 void ipmi_si_platform_init(void)
1399 diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
1400 index 91b90c0cea73..12acdac85820 100644
1401 --- a/drivers/gpio/gpio-adnp.c
1402 +++ b/drivers/gpio/gpio-adnp.c
1403 @@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
1404 if (err < 0)
1405 goto out;
1406
1407 - if (err & BIT(pos))
1408 - err = -EACCES;
1409 + if (value & BIT(pos)) {
1410 + err = -EPERM;
1411 + goto out;
1412 + }
1413
1414 err = 0;
1415
1416 diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
1417 index 0ecd2369c2ca..a09d2f9ebacc 100644
1418 --- a/drivers/gpio/gpio-exar.c
1419 +++ b/drivers/gpio/gpio-exar.c
1420 @@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
1421 mutex_init(&exar_gpio->lock);
1422
1423 index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
1424 + if (index < 0)
1425 + goto err_destroy;
1426
1427 sprintf(exar_gpio->name, "exar_gpio%d", index);
1428 exar_gpio->gpio_chip.label = exar_gpio->name;
1429 diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
1430 index a614db310ea2..be15289bff9c 100644
1431 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
1432 +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
1433 @@ -1446,7 +1446,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1434 }
1435
1436 if (index_mode) {
1437 - if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
1438 + if (guest_gma >= I915_GTT_PAGE_SIZE) {
1439 ret = -EFAULT;
1440 goto err;
1441 }
1442 diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
1443 index 1359e5c773e4..f8f9ae6622eb 100644
1444 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
1445 +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
1446 @@ -505,6 +505,18 @@ static void vop_core_clks_disable(struct vop *vop)
1447 clk_disable(vop->hclk);
1448 }
1449
1450 +static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
1451 +{
1452 + if (win->phy->scl && win->phy->scl->ext) {
1453 + VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
1454 + VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
1455 + VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
1456 + VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
1457 + }
1458 +
1459 + VOP_WIN_SET(vop, win, enable, 0);
1460 +}
1461 +
1462 static int vop_enable(struct drm_crtc *crtc)
1463 {
1464 struct vop *vop = to_vop(crtc);
1465 @@ -550,7 +562,7 @@ static int vop_enable(struct drm_crtc *crtc)
1466 struct vop_win *vop_win = &vop->win[i];
1467 const struct vop_win_data *win = vop_win->data;
1468
1469 - VOP_WIN_SET(vop, win, enable, 0);
1470 + vop_win_disable(vop, win);
1471 }
1472 spin_unlock(&vop->reg_lock);
1473
1474 @@ -694,7 +706,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
1475
1476 spin_lock(&vop->reg_lock);
1477
1478 - VOP_WIN_SET(vop, win, enable, 0);
1479 + vop_win_disable(vop, win);
1480
1481 spin_unlock(&vop->reg_lock);
1482 }
1483 @@ -1449,7 +1461,7 @@ static int vop_initial(struct vop *vop)
1484 int channel = i * 2 + 1;
1485
1486 VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
1487 - VOP_WIN_SET(vop, win, enable, 0);
1488 + vop_win_disable(vop, win);
1489 VOP_WIN_SET(vop, win, gate, 1);
1490 }
1491
1492 diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
1493 index 6887db878b38..4709f08f39e4 100644
1494 --- a/drivers/gpu/drm/vgem/vgem_drv.c
1495 +++ b/drivers/gpu/drm/vgem/vgem_drv.c
1496 @@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
1497 ret = drm_gem_handle_create(file, &obj->base, handle);
1498 drm_gem_object_put_unlocked(&obj->base);
1499 if (ret)
1500 - goto err;
1501 + return ERR_PTR(ret);
1502
1503 return &obj->base;
1504 -
1505 -err:
1506 - __vgem_gem_destroy(obj);
1507 - return ERR_PTR(ret);
1508 }
1509
1510 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
1511 diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
1512 index ca4a74e04977..ce394009a36c 100644
1513 --- a/drivers/gpu/drm/vkms/vkms_gem.c
1514 +++ b/drivers/gpu/drm/vkms/vkms_gem.c
1515 @@ -110,11 +110,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
1516
1517 ret = drm_gem_handle_create(file, &obj->gem, handle);
1518 drm_gem_object_put_unlocked(&obj->gem);
1519 - if (ret) {
1520 - drm_gem_object_release(&obj->gem);
1521 - kfree(obj);
1522 + if (ret)
1523 return ERR_PTR(ret);
1524 - }
1525
1526 return &obj->gem;
1527 }
1528 diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
1529 index b5948ba6b3b3..fde728ea2900 100644
1530 --- a/drivers/iommu/io-pgtable-arm-v7s.c
1531 +++ b/drivers/iommu/io-pgtable-arm-v7s.c
1532 @@ -161,6 +161,14 @@
1533
1534 #define ARM_V7S_TCR_PD1 BIT(5)
1535
1536 +#ifdef CONFIG_ZONE_DMA32
1537 +#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
1538 +#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
1539 +#else
1540 +#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
1541 +#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
1542 +#endif
1543 +
1544 typedef u32 arm_v7s_iopte;
1545
1546 static bool selftest_running;
1547 @@ -198,13 +206,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
1548 void *table = NULL;
1549
1550 if (lvl == 1)
1551 - table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
1552 + table = (void *)__get_free_pages(
1553 + __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
1554 else if (lvl == 2)
1555 - table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
1556 + table = kmem_cache_zalloc(data->l2_tables, gfp);
1557 phys = virt_to_phys(table);
1558 - if (phys != (arm_v7s_iopte)phys)
1559 + if (phys != (arm_v7s_iopte)phys) {
1560 /* Doesn't fit in PTE */
1561 + dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
1562 goto out_free;
1563 + }
1564 if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
1565 dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
1566 if (dma_mapping_error(dev, dma))
1567 @@ -728,7 +739,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
1568 data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
1569 ARM_V7S_TABLE_SIZE(2),
1570 ARM_V7S_TABLE_SIZE(2),
1571 - SLAB_CACHE_DMA, NULL);
1572 + ARM_V7S_TABLE_SLAB_FLAGS, NULL);
1573 if (!data->l2_tables)
1574 goto out_free_data;
1575
1576 diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
1577 index 4d85645c87f7..0928fd1f0e0c 100644
1578 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c
1579 +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
1580 @@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
1581 if (m->clock2)
1582 test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
1583
1584 - if (ent->device == 0xB410) {
1585 + if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
1586 + ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
1587 test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
1588 test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
1589 test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
1590 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
1591 index d03775100f7d..619bf1498a66 100644
1592 --- a/drivers/net/Kconfig
1593 +++ b/drivers/net/Kconfig
1594 @@ -213,8 +213,8 @@ config GENEVE
1595
1596 config GTP
1597 tristate "GPRS Tunneling Protocol datapath (GTP-U)"
1598 - depends on INET && NET_UDP_TUNNEL
1599 - select NET_IP_TUNNEL
1600 + depends on INET
1601 + select NET_UDP_TUNNEL
1602 ---help---
1603 This allows one to create gtp virtual interfaces that provide
1604 the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
1605 diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
1606 index cdcde7f8e0b2..bdd8f2df6630 100644
1607 --- a/drivers/net/dsa/qca8k.c
1608 +++ b/drivers/net/dsa/qca8k.c
1609 @@ -620,22 +620,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
1610 qca8k_port_set_status(priv, port, 1);
1611 }
1612
1613 -static int
1614 -qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
1615 -{
1616 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1617 -
1618 - return mdiobus_read(priv->bus, phy, regnum);
1619 -}
1620 -
1621 -static int
1622 -qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
1623 -{
1624 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1625 -
1626 - return mdiobus_write(priv->bus, phy, regnum, val);
1627 -}
1628 -
1629 static void
1630 qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
1631 {
1632 @@ -876,8 +860,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
1633 .setup = qca8k_setup,
1634 .adjust_link = qca8k_adjust_link,
1635 .get_strings = qca8k_get_strings,
1636 - .phy_read = qca8k_phy_read,
1637 - .phy_write = qca8k_phy_write,
1638 .get_ethtool_stats = qca8k_get_ethtool_stats,
1639 .get_sset_count = qca8k_get_sset_count,
1640 .get_mac_eee = qca8k_get_mac_eee,
1641 diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
1642 index 342ae08ec3c2..d60a86aa8aa8 100644
1643 --- a/drivers/net/ethernet/8390/mac8390.c
1644 +++ b/drivers/net/ethernet/8390/mac8390.c
1645 @@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
1646 static void dayna_block_output(struct net_device *dev, int count,
1647 const unsigned char *buf, int start_page);
1648
1649 -#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
1650 -
1651 /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
1652 static void slow_sane_get_8390_hdr(struct net_device *dev,
1653 struct e8390_pkt_hdr *hdr, int ring_page);
1654 @@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
1655
1656 static enum mac8390_access mac8390_testio(unsigned long membase)
1657 {
1658 - unsigned long outdata = 0xA5A0B5B0;
1659 - unsigned long indata = 0x00000000;
1660 + u32 outdata = 0xA5A0B5B0;
1661 + u32 indata = 0;
1662 +
1663 /* Try writing 32 bits */
1664 - memcpy_toio((void __iomem *)membase, &outdata, 4);
1665 - /* Now compare them */
1666 - if (memcmp_withio(&outdata, membase, 4) == 0)
1667 + nubus_writel(outdata, membase);
1668 + /* Now read it back */
1669 + indata = nubus_readl(membase);
1670 + if (outdata == indata)
1671 return ACCESS_32;
1672 +
1673 + outdata = 0xC5C0D5D0;
1674 + indata = 0;
1675 +
1676 /* Write 16 bit output */
1677 word_memcpy_tocard(membase, &outdata, 4);
1678 /* Now read it back */
1679 word_memcpy_fromcard(&indata, membase, 4);
1680 if (outdata == indata)
1681 return ACCESS_16;
1682 +
1683 return ACCESS_UNKNOWN;
1684 }
1685
1686 diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1687 index 7134d0d4cdf7..6f3312350cac 100644
1688 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1689 +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1690 @@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self,
1691 }
1692 if (buff->is_ip_cso) {
1693 __skb_incr_checksum_unnecessary(skb);
1694 - if (buff->is_udp_cso || buff->is_tcp_cso)
1695 - __skb_incr_checksum_unnecessary(skb);
1696 } else {
1697 skb->ip_summed = CHECKSUM_NONE;
1698 }
1699 +
1700 + if (buff->is_udp_cso || buff->is_tcp_cso)
1701 + __skb_incr_checksum_unnecessary(skb);
1702 }
1703
1704 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
1705 diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1706 index fcaf18fa3904..9a4cfa61ed93 100644
1707 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1708 +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1709 @@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
1710 /* Check if page can be recycled */
1711 if (page) {
1712 ref_count = page_ref_count(page);
1713 - /* Check if this page has been used once i.e 'put_page'
1714 - * called after packet transmission i.e internal ref_count
1715 - * and page's ref_count are equal i.e page can be recycled.
1716 + /* This page can be recycled if internal ref_count and page's
1717 + * ref_count are equal, indicating that the page has been used
1718 + * once for packet transmission. For non-XDP mode, internal
1719 + * ref_count is always '1'.
1720 */
1721 - if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
1722 - pgcache->ref_count--;
1723 - else
1724 - page = NULL;
1725 -
1726 - /* In non-XDP mode, page's ref_count needs to be '1' for it
1727 - * to be recycled.
1728 - */
1729 - if (!rbdr->is_xdp && (ref_count != 1))
1730 + if (rbdr->is_xdp) {
1731 + if (ref_count == pgcache->ref_count)
1732 + pgcache->ref_count--;
1733 + else
1734 + page = NULL;
1735 + } else if (ref_count != 1) {
1736 page = NULL;
1737 + }
1738 }
1739
1740 if (!page) {
1741 @@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
1742 while (head < rbdr->pgcnt) {
1743 pgcache = &rbdr->pgcache[head];
1744 if (pgcache->page && page_ref_count(pgcache->page) != 0) {
1745 - if (!rbdr->is_xdp) {
1746 - put_page(pgcache->page);
1747 - continue;
1748 + if (rbdr->is_xdp) {
1749 + page_ref_sub(pgcache->page,
1750 + pgcache->ref_count - 1);
1751 }
1752 - page_ref_sub(pgcache->page, pgcache->ref_count - 1);
1753 put_page(pgcache->page);
1754 }
1755 head++;
1756 diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1757 index bc83ced94e1b..afed0f0f4027 100644
1758 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1759 +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1760 @@ -111,10 +111,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
1761
1762 static void refill_desc3(void *priv_ptr, struct dma_desc *p)
1763 {
1764 - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
1765 + struct stmmac_rx_queue *rx_q = priv_ptr;
1766 + struct stmmac_priv *priv = rx_q->priv_data;
1767
1768 /* Fill DES3 in case of RING mode */
1769 - if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
1770 + if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1771 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
1772 }
1773
1774 diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
1775 index ddc2c5ea3787..7ceebbc4bcc2 100644
1776 --- a/drivers/net/phy/meson-gxl.c
1777 +++ b/drivers/net/phy/meson-gxl.c
1778 @@ -211,6 +211,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
1779 static int meson_gxl_config_intr(struct phy_device *phydev)
1780 {
1781 u16 val;
1782 + int ret;
1783
1784 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1785 val = INTSRC_ANEG_PR
1786 @@ -223,6 +224,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
1787 val = 0;
1788 }
1789
1790 + /* Ack any pending IRQ */
1791 + ret = meson_gxl_ack_interrupt(phydev);
1792 + if (ret)
1793 + return ret;
1794 +
1795 return phy_write(phydev, INTSRC_MASK, val);
1796 }
1797
1798 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1799 index f3293355c784..044d5c3a4d04 100644
1800 --- a/drivers/net/tun.c
1801 +++ b/drivers/net/tun.c
1802 @@ -1718,9 +1718,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1803 int skb_xdp = 1;
1804 bool frags = tun_napi_frags_enabled(tfile);
1805
1806 - if (!(tun->dev->flags & IFF_UP))
1807 - return -EIO;
1808 -
1809 if (!(tun->flags & IFF_NO_PI)) {
1810 if (len < sizeof(pi))
1811 return -EINVAL;
1812 @@ -1822,6 +1819,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1813 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1814
1815 if (err) {
1816 + err = -EFAULT;
1817 +drop:
1818 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1819 kfree_skb(skb);
1820 if (frags) {
1821 @@ -1829,7 +1828,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1822 mutex_unlock(&tfile->napi_mutex);
1823 }
1824
1825 - return -EFAULT;
1826 + return err;
1827 }
1828 }
1829
1830 @@ -1913,6 +1912,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1831 !tfile->detached)
1832 rxhash = __skb_get_hash_symmetric(skb);
1833
1834 + rcu_read_lock();
1835 + if (unlikely(!(tun->dev->flags & IFF_UP))) {
1836 + err = -EIO;
1837 + rcu_read_unlock();
1838 + goto drop;
1839 + }
1840 +
1841 if (frags) {
1842 /* Exercise flow dissector code path. */
1843 u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
1844 @@ -1920,6 +1926,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1845 if (unlikely(headlen > skb_headlen(skb))) {
1846 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1847 napi_free_frags(&tfile->napi);
1848 + rcu_read_unlock();
1849 mutex_unlock(&tfile->napi_mutex);
1850 WARN_ON(1);
1851 return -ENOMEM;
1852 @@ -1947,6 +1954,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1853 } else {
1854 netif_rx_ni(skb);
1855 }
1856 + rcu_read_unlock();
1857
1858 stats = get_cpu_ptr(tun->pcpu_stats);
1859 u64_stats_update_begin(&stats->syncp);
1860 diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
1861 index f93547f257fb..449fc52f9a89 100644
1862 --- a/drivers/net/vrf.c
1863 +++ b/drivers/net/vrf.c
1864 @@ -1262,6 +1262,7 @@ static void vrf_setup(struct net_device *dev)
1865
1866 /* default to no qdisc; user can add if desired */
1867 dev->priv_flags |= IFF_NO_QUEUE;
1868 + dev->priv_flags |= IFF_NO_RX_HANDLER;
1869 }
1870
1871 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
1872 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1873 index 52387f7f12ed..0b1ec44acbf9 100644
1874 --- a/drivers/net/vxlan.c
1875 +++ b/drivers/net/vxlan.c
1876 @@ -3798,10 +3798,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
1877 /* If vxlan->dev is in the same netns, it has already been added
1878 * to the list by the previous loop.
1879 */
1880 - if (!net_eq(dev_net(vxlan->dev), net)) {
1881 - gro_cells_destroy(&vxlan->gro_cells);
1882 + if (!net_eq(dev_net(vxlan->dev), net))
1883 unregister_netdevice_queue(vxlan->dev, head);
1884 - }
1885 }
1886
1887 for (h = 0; h < PORT_HASH_SIZE; ++h)
1888 diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
1889 index 881078ff73f6..15c8fc2abf01 100644
1890 --- a/drivers/phy/allwinner/phy-sun4i-usb.c
1891 +++ b/drivers/phy/allwinner/phy-sun4i-usb.c
1892 @@ -481,8 +481,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy, enum phy_mode mode)
1893 struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
1894 int new_mode;
1895
1896 - if (phy->index != 0)
1897 + if (phy->index != 0) {
1898 + if (mode == PHY_MODE_USB_HOST)
1899 + return 0;
1900 return -EINVAL;
1901 + }
1902
1903 switch (mode) {
1904 case PHY_MODE_USB_HOST:
1905 diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
1906 index 39d4100c60a2..a26f410800c2 100644
1907 --- a/drivers/platform/x86/intel_cht_int33fe.c
1908 +++ b/drivers/platform/x86/intel_cht_int33fe.c
1909 @@ -34,7 +34,7 @@ struct cht_int33fe_data {
1910 struct i2c_client *fusb302;
1911 struct i2c_client *pi3usb30532;
1912 /* Contain a list-head must be per device */
1913 - struct device_connection connections[3];
1914 + struct device_connection connections[5];
1915 };
1916
1917 /*
1918 @@ -174,19 +174,20 @@ static int cht_int33fe_probe(struct i2c_client *client)
1919 return -EPROBE_DEFER; /* Wait for i2c-adapter to load */
1920 }
1921
1922 - data->connections[0].endpoint[0] = "i2c-fusb302";
1923 + data->connections[0].endpoint[0] = "port0";
1924 data->connections[0].endpoint[1] = "i2c-pi3usb30532";
1925 data->connections[0].id = "typec-switch";
1926 - data->connections[1].endpoint[0] = "i2c-fusb302";
1927 + data->connections[1].endpoint[0] = "port0";
1928 data->connections[1].endpoint[1] = "i2c-pi3usb30532";
1929 data->connections[1].id = "typec-mux";
1930 - data->connections[2].endpoint[0] = "i2c-fusb302";
1931 - data->connections[2].endpoint[1] = "intel_xhci_usb_sw-role-switch";
1932 - data->connections[2].id = "usb-role-switch";
1933 + data->connections[2].endpoint[0] = "port0";
1934 + data->connections[2].endpoint[1] = "i2c-pi3usb30532";
1935 + data->connections[2].id = "idff01m01";
1936 + data->connections[3].endpoint[0] = "i2c-fusb302";
1937 + data->connections[3].endpoint[1] = "intel_xhci_usb_sw-role-switch";
1938 + data->connections[3].id = "usb-role-switch";
1939
1940 - device_connection_add(&data->connections[0]);
1941 - device_connection_add(&data->connections[1]);
1942 - device_connection_add(&data->connections[2]);
1943 + device_connections_add(data->connections);
1944
1945 memset(&board_info, 0, sizeof(board_info));
1946 strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
1947 @@ -217,9 +218,7 @@ out_unregister_max17047:
1948 if (data->max17047)
1949 i2c_unregister_device(data->max17047);
1950
1951 - device_connection_remove(&data->connections[2]);
1952 - device_connection_remove(&data->connections[1]);
1953 - device_connection_remove(&data->connections[0]);
1954 + device_connections_remove(data->connections);
1955
1956 return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
1957 }
1958 @@ -233,9 +232,7 @@ static int cht_int33fe_remove(struct i2c_client *i2c)
1959 if (data->max17047)
1960 i2c_unregister_device(data->max17047);
1961
1962 - device_connection_remove(&data->connections[2]);
1963 - device_connection_remove(&data->connections[1]);
1964 - device_connection_remove(&data->connections[0]);
1965 + device_connections_remove(data->connections);
1966
1967 return 0;
1968 }
1969 diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
1970 index f47d16b5810b..fabd9798e4c4 100644
1971 --- a/drivers/s390/cio/vfio_ccw_drv.c
1972 +++ b/drivers/s390/cio/vfio_ccw_drv.c
1973 @@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
1974 {
1975 struct vfio_ccw_private *private;
1976 struct irb *irb;
1977 + bool is_final;
1978
1979 private = container_of(work, struct vfio_ccw_private, io_work);
1980 irb = &private->irb;
1981
1982 + is_final = !(scsw_actl(&irb->scsw) &
1983 + (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
1984 if (scsw_is_solicited(&irb->scsw)) {
1985 cp_update_scsw(&private->cp, &irb->scsw);
1986 - cp_free(&private->cp);
1987 + if (is_final)
1988 + cp_free(&private->cp);
1989 }
1990 memcpy(private->io_region->irb_area, irb, sizeof(*irb));
1991
1992 if (private->io_trigger)
1993 eventfd_signal(private->io_trigger, 1);
1994
1995 - if (private->mdev)
1996 + if (private->mdev && is_final)
1997 private->state = VFIO_CCW_STATE_IDLE;
1998 }
1999
2000 diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
2001 index e7e6b63905e2..ebdbc457003f 100644
2002 --- a/drivers/s390/scsi/zfcp_erp.c
2003 +++ b/drivers/s390/scsi/zfcp_erp.c
2004 @@ -643,6 +643,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
2005 add_timer(&erp_action->timer);
2006 }
2007
2008 +void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
2009 + int clear, char *dbftag)
2010 +{
2011 + unsigned long flags;
2012 + struct zfcp_port *port;
2013 +
2014 + write_lock_irqsave(&adapter->erp_lock, flags);
2015 + read_lock(&adapter->port_list_lock);
2016 + list_for_each_entry(port, &adapter->port_list, list)
2017 + _zfcp_erp_port_forced_reopen(port, clear, dbftag);
2018 + read_unlock(&adapter->port_list_lock);
2019 + write_unlock_irqrestore(&adapter->erp_lock, flags);
2020 +}
2021 +
2022 static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
2023 int clear, char *id)
2024 {
2025 @@ -1297,6 +1311,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
2026 struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
2027 int lun_status;
2028
2029 + if (sdev->sdev_state == SDEV_DEL ||
2030 + sdev->sdev_state == SDEV_CANCEL)
2031 + continue;
2032 if (zsdev->port != port)
2033 continue;
2034 /* LUN under port of interest */
2035 diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
2036 index bd0c5a9f04cb..1b4d6a3afb8f 100644
2037 --- a/drivers/s390/scsi/zfcp_ext.h
2038 +++ b/drivers/s390/scsi/zfcp_ext.h
2039 @@ -69,6 +69,8 @@ extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
2040 extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id);
2041 extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
2042 extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
2043 +extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
2044 + int clear, char *dbftag);
2045 extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
2046 extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
2047 extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
2048 diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
2049 index a8efcb330bc1..a4bbfa4ef653 100644
2050 --- a/drivers/s390/scsi/zfcp_scsi.c
2051 +++ b/drivers/s390/scsi/zfcp_scsi.c
2052 @@ -362,6 +362,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
2053 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2054 int ret = SUCCESS, fc_ret;
2055
2056 + if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
2057 + zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
2058 + zfcp_erp_wait(adapter);
2059 + }
2060 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
2061 zfcp_erp_wait(adapter);
2062 fc_ret = fc_block_scsi_eh(scpnt);
2063 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2064 index a3a5162fa60e..e925eda93191 100644
2065 --- a/drivers/scsi/sd.c
2066 +++ b/drivers/scsi/sd.c
2067 @@ -1408,11 +1408,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
2068 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
2069 }
2070
2071 - /*
2072 - * XXX and what if there are packets in flight and this close()
2073 - * XXX is followed by a "rmmod sd_mod"?
2074 - */
2075 -
2076 scsi_disk_put(sdkp);
2077 }
2078
2079 @@ -3078,6 +3073,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
2080 unsigned int opt_xfer_bytes =
2081 logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
2082
2083 + if (sdkp->opt_xfer_blocks == 0)
2084 + return false;
2085 +
2086 if (sdkp->opt_xfer_blocks > dev_max) {
2087 sd_first_printk(KERN_WARNING, sdkp,
2088 "Optimal transfer size %u logical blocks " \
2089 @@ -3509,9 +3507,21 @@ static void scsi_disk_release(struct device *dev)
2090 {
2091 struct scsi_disk *sdkp = to_scsi_disk(dev);
2092 struct gendisk *disk = sdkp->disk;
2093 -
2094 + struct request_queue *q = disk->queue;
2095 +
2096 ida_free(&sd_index_ida, sdkp->index);
2097
2098 + /*
2099 + * Wait until all requests that are in progress have completed.
2100 + * This is necessary to avoid that e.g. scsi_end_request() crashes
2101 + * due to clearing the disk->private_data pointer. Wait from inside
2102 + * scsi_disk_release() instead of from sd_release() to avoid that
2103 + * freezing and unfreezing the request queue affects user space I/O
2104 + * in case multiple processes open a /dev/sd... node concurrently.
2105 + */
2106 + blk_mq_freeze_queue(q);
2107 + blk_mq_unfreeze_queue(q);
2108 +
2109 disk->private_data = NULL;
2110 put_disk(disk);
2111 put_device(&sdkp->device->sdev_gendev);
2112 diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
2113 index 5775a93917f4..fbbdf4b0f6c5 100644
2114 --- a/drivers/staging/comedi/comedidev.h
2115 +++ b/drivers/staging/comedi/comedidev.h
2116 @@ -987,6 +987,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
2117 unsigned int mask);
2118 unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
2119 unsigned int *data);
2120 +unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
2121 + struct comedi_cmd *cmd);
2122 unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
2123 unsigned int comedi_nscans_left(struct comedi_subdevice *s,
2124 unsigned int nscans);
2125 diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
2126 index 57dd63d548b7..5329a3955214 100644
2127 --- a/drivers/staging/comedi/drivers.c
2128 +++ b/drivers/staging/comedi/drivers.c
2129 @@ -381,11 +381,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
2130 EXPORT_SYMBOL_GPL(comedi_dio_update_state);
2131
2132 /**
2133 - * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
2134 + * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
2135 + * bytes
2136 * @s: COMEDI subdevice.
2137 + * @cmd: COMEDI command.
2138 *
2139 * Determines the overall scan length according to the subdevice type and the
2140 - * number of channels in the scan.
2141 + * number of channels in the scan for the specified command.
2142 *
2143 * For digital input, output or input/output subdevices, samples for
2144 * multiple channels are assumed to be packed into one or more unsigned
2145 @@ -395,9 +397,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
2146 *
2147 * Returns the overall scan length in bytes.
2148 */
2149 -unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
2150 +unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
2151 + struct comedi_cmd *cmd)
2152 {
2153 - struct comedi_cmd *cmd = &s->async->cmd;
2154 unsigned int num_samples;
2155 unsigned int bits_per_sample;
2156
2157 @@ -414,6 +416,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
2158 }
2159 return comedi_samples_to_bytes(s, num_samples);
2160 }
2161 +EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
2162 +
2163 +/**
2164 + * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
2165 + * @s: COMEDI subdevice.
2166 + *
2167 + * Determines the overall scan length according to the subdevice type and the
2168 + * number of channels in the scan for the current command.
2169 + *
2170 + * For digital input, output or input/output subdevices, samples for
2171 + * multiple channels are assumed to be packed into one or more unsigned
2172 + * short or unsigned int values according to the subdevice's %SDF_LSAMPL
2173 + * flag. For other types of subdevice, samples are assumed to occupy a
2174 + * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
2175 + *
2176 + * Returns the overall scan length in bytes.
2177 + */
2178 +unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
2179 +{
2180 + struct comedi_cmd *cmd = &s->async->cmd;
2181 +
2182 + return comedi_bytes_per_scan_cmd(s, cmd);
2183 +}
2184 EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
2185
2186 static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
2187 diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
2188 index 4dee2fc37aed..d799b1b55de3 100644
2189 --- a/drivers/staging/comedi/drivers/ni_mio_common.c
2190 +++ b/drivers/staging/comedi/drivers/ni_mio_common.c
2191 @@ -3516,6 +3516,7 @@ static int ni_cdio_check_chanlist(struct comedi_device *dev,
2192 static int ni_cdio_cmdtest(struct comedi_device *dev,
2193 struct comedi_subdevice *s, struct comedi_cmd *cmd)
2194 {
2195 + unsigned int bytes_per_scan;
2196 int err = 0;
2197 int tmp;
2198
2199 @@ -3545,9 +3546,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
2200 err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
2201 err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
2202 cmd->chanlist_len);
2203 - err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
2204 - s->async->prealloc_bufsz /
2205 - comedi_bytes_per_scan(s));
2206 + bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
2207 + if (bytes_per_scan) {
2208 + err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
2209 + s->async->prealloc_bufsz /
2210 + bytes_per_scan);
2211 + }
2212
2213 if (err)
2214 return 3;
2215 diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
2216 index 04b84ff31d03..0a089cf5c78f 100644
2217 --- a/drivers/staging/erofs/dir.c
2218 +++ b/drivers/staging/erofs/dir.c
2219 @@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
2220 [EROFS_FT_SYMLINK] = DT_LNK,
2221 };
2222
2223 +static void debug_one_dentry(unsigned char d_type, const char *de_name,
2224 + unsigned int de_namelen)
2225 +{
2226 +#ifdef CONFIG_EROFS_FS_DEBUG
2227 + /* since the on-disk name could not have the trailing '\0' */
2228 + unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
2229 +
2230 + memcpy(dbg_namebuf, de_name, de_namelen);
2231 + dbg_namebuf[de_namelen] = '\0';
2232 +
2233 + debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
2234 + de_namelen, d_type);
2235 +#endif
2236 +}
2237 +
2238 static int erofs_fill_dentries(struct dir_context *ctx,
2239 void *dentry_blk, unsigned *ofs,
2240 unsigned nameoff, unsigned maxsize)
2241 @@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
2242 de = dentry_blk + *ofs;
2243 while (de < end) {
2244 const char *de_name;
2245 - int de_namelen;
2246 + unsigned int de_namelen;
2247 unsigned char d_type;
2248 -#ifdef CONFIG_EROFS_FS_DEBUG
2249 - unsigned dbg_namelen;
2250 - unsigned char dbg_namebuf[EROFS_NAME_LEN];
2251 -#endif
2252
2253 - if (unlikely(de->file_type < EROFS_FT_MAX))
2254 + if (de->file_type < EROFS_FT_MAX)
2255 d_type = erofs_filetype_table[de->file_type];
2256 else
2257 d_type = DT_UNKNOWN;
2258 @@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
2259 nameoff = le16_to_cpu(de->nameoff);
2260 de_name = (char *)dentry_blk + nameoff;
2261
2262 - de_namelen = unlikely(de + 1 >= end) ?
2263 - /* last directory entry */
2264 - strnlen(de_name, maxsize - nameoff) :
2265 - le16_to_cpu(de[1].nameoff) - nameoff;
2266 + /* the last dirent in the block? */
2267 + if (de + 1 >= end)
2268 + de_namelen = strnlen(de_name, maxsize - nameoff);
2269 + else
2270 + de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
2271
2272 /* a corrupted entry is found */
2273 - if (unlikely(de_namelen < 0)) {
2274 + if (unlikely(nameoff + de_namelen > maxsize ||
2275 + de_namelen > EROFS_NAME_LEN)) {
2276 DBG_BUGON(1);
2277 return -EIO;
2278 }
2279
2280 -#ifdef CONFIG_EROFS_FS_DEBUG
2281 - dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
2282 - memcpy(dbg_namebuf, de_name, dbg_namelen);
2283 - dbg_namebuf[dbg_namelen] = '\0';
2284 -
2285 - debugln("%s, found de_name %s de_len %d d_type %d", __func__,
2286 - dbg_namebuf, de_namelen, d_type);
2287 -#endif
2288 -
2289 + debug_one_dentry(d_type, de_name, de_namelen);
2290 if (!dir_emit(ctx, de_name, de_namelen,
2291 le64_to_cpu(de->nid), d_type))
2292 /* stoped by some reason */
2293 diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
2294 index f44662dd795c..ad6fe6d9d00a 100644
2295 --- a/drivers/staging/erofs/unzip_vle.c
2296 +++ b/drivers/staging/erofs/unzip_vle.c
2297 @@ -885,6 +885,7 @@ repeat:
2298 overlapped = false;
2299 compressed_pages = grp->compressed_pages;
2300
2301 + err = 0;
2302 for (i = 0; i < clusterpages; ++i) {
2303 unsigned pagenr;
2304
2305 @@ -894,26 +895,39 @@ repeat:
2306 DBG_BUGON(page == NULL);
2307 DBG_BUGON(page->mapping == NULL);
2308
2309 - if (z_erofs_is_stagingpage(page))
2310 - continue;
2311 + if (!z_erofs_is_stagingpage(page)) {
2312 #ifdef EROFS_FS_HAS_MANAGED_CACHE
2313 - if (page->mapping == mngda) {
2314 - DBG_BUGON(!PageUptodate(page));
2315 - continue;
2316 - }
2317 + if (page->mapping == mngda) {
2318 + if (unlikely(!PageUptodate(page)))
2319 + err = -EIO;
2320 + continue;
2321 + }
2322 #endif
2323
2324 - /* only non-head page could be reused as a compressed page */
2325 - pagenr = z_erofs_onlinepage_index(page);
2326 + /*
2327 + * only if non-head page can be selected
2328 + * for inplace decompression
2329 + */
2330 + pagenr = z_erofs_onlinepage_index(page);
2331
2332 - DBG_BUGON(pagenr >= nr_pages);
2333 - DBG_BUGON(pages[pagenr]);
2334 - ++sparsemem_pages;
2335 - pages[pagenr] = page;
2336 + DBG_BUGON(pagenr >= nr_pages);
2337 + DBG_BUGON(pages[pagenr]);
2338 + ++sparsemem_pages;
2339 + pages[pagenr] = page;
2340
2341 - overlapped = true;
2342 + overlapped = true;
2343 + }
2344 +
2345 + /* PG_error needs checking for inplaced and staging pages */
2346 + if (unlikely(PageError(page))) {
2347 + DBG_BUGON(PageUptodate(page));
2348 + err = -EIO;
2349 + }
2350 }
2351
2352 + if (unlikely(err))
2353 + goto out;
2354 +
2355 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
2356
2357 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
2358 @@ -942,6 +956,10 @@ repeat:
2359
2360 skip_allocpage:
2361 vout = erofs_vmap(pages, nr_pages);
2362 + if (!vout) {
2363 + err = -ENOMEM;
2364 + goto out;
2365 + }
2366
2367 err = z_erofs_vle_unzip_vmap(compressed_pages,
2368 clusterpages, vout, llen, work->pageofs, overlapped);
2369 @@ -1078,6 +1096,8 @@ static inline bool recover_managed_page(struct z_erofs_vle_workgroup *grp,
2370 return true;
2371
2372 lock_page(page);
2373 + ClearPageError(page);
2374 +
2375 if (unlikely(!PagePrivate(page))) {
2376 set_page_private(page, (unsigned long)grp);
2377 SetPagePrivate(page);
2378 diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
2379 index 055420e8af2c..3a7428317f0a 100644
2380 --- a/drivers/staging/erofs/unzip_vle_lz4.c
2381 +++ b/drivers/staging/erofs/unzip_vle_lz4.c
2382 @@ -116,10 +116,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
2383
2384 nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
2385
2386 - if (clusterpages == 1)
2387 + if (clusterpages == 1) {
2388 vin = kmap_atomic(compressed_pages[0]);
2389 - else
2390 + } else {
2391 vin = erofs_vmap(compressed_pages, clusterpages);
2392 + if (!vin)
2393 + return -ENOMEM;
2394 + }
2395
2396 preempt_disable();
2397 vout = erofs_pcpubuf[smp_processor_id()].data;
2398 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
2399 index 947c79532e10..d5383974d40e 100644
2400 --- a/drivers/staging/speakup/speakup_soft.c
2401 +++ b/drivers/staging/speakup/speakup_soft.c
2402 @@ -208,12 +208,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
2403 return -EINVAL;
2404
2405 spin_lock_irqsave(&speakup_info.spinlock, flags);
2406 + synth_soft.alive = 1;
2407 while (1) {
2408 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
2409 - if (!unicode)
2410 - synth_buffer_skip_nonlatin1();
2411 - if (!synth_buffer_empty() || speakup_info.flushing)
2412 - break;
2413 + if (synth_current() == &synth_soft) {
2414 + if (!unicode)
2415 + synth_buffer_skip_nonlatin1();
2416 + if (!synth_buffer_empty() || speakup_info.flushing)
2417 + break;
2418 + }
2419 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
2420 if (fp->f_flags & O_NONBLOCK) {
2421 finish_wait(&speakup_event, &wait);
2422 @@ -233,6 +236,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
2423
2424 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
2425 while (chars_sent <= count - bytes_per_ch) {
2426 + if (synth_current() != &synth_soft)
2427 + break;
2428 if (speakup_info.flushing) {
2429 speakup_info.flushing = 0;
2430 ch = '\x18';
2431 @@ -329,7 +334,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
2432 poll_wait(fp, &speakup_event, wait);
2433
2434 spin_lock_irqsave(&speakup_info.spinlock, flags);
2435 - if (!synth_buffer_empty() || speakup_info.flushing)
2436 + if (synth_current() == &synth_soft &&
2437 + (!synth_buffer_empty() || speakup_info.flushing))
2438 ret = EPOLLIN | EPOLLRDNORM;
2439 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
2440 return ret;
2441 diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
2442 index 7b3a16e1fa23..796ffcca43c1 100644
2443 --- a/drivers/staging/speakup/spk_priv.h
2444 +++ b/drivers/staging/speakup/spk_priv.h
2445 @@ -72,6 +72,7 @@ int synth_request_region(unsigned long start, unsigned long n);
2446 int synth_release_region(unsigned long start, unsigned long n);
2447 int synth_add(struct spk_synth *in_synth);
2448 void synth_remove(struct spk_synth *in_synth);
2449 +struct spk_synth *synth_current(void);
2450
2451 extern struct speakup_info_t speakup_info;
2452
2453 diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
2454 index 25f259ee4ffc..3568bfb89912 100644
2455 --- a/drivers/staging/speakup/synth.c
2456 +++ b/drivers/staging/speakup/synth.c
2457 @@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
2458 }
2459 EXPORT_SYMBOL_GPL(synth_remove);
2460
2461 +struct spk_synth *synth_current(void)
2462 +{
2463 + return synth;
2464 +}
2465 +EXPORT_SYMBOL_GPL(synth_current);
2466 +
2467 short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
2468 diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
2469 index 1ab0e8562d40..607804aa560d 100644
2470 --- a/drivers/staging/vt6655/device_main.c
2471 +++ b/drivers/staging/vt6655/device_main.c
2472 @@ -1040,8 +1040,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
2473 return;
2474 }
2475
2476 - MACvIntDisable(priv->PortOffset);
2477 -
2478 spin_lock_irqsave(&priv->lock, flags);
2479
2480 /* Read low level stats */
2481 @@ -1129,8 +1127,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
2482 }
2483
2484 spin_unlock_irqrestore(&priv->lock, flags);
2485 -
2486 - MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
2487 }
2488
2489 static void vnt_interrupt_work(struct work_struct *work)
2490 @@ -1140,14 +1136,17 @@ static void vnt_interrupt_work(struct work_struct *work)
2491
2492 if (priv->vif)
2493 vnt_interrupt_process(priv);
2494 +
2495 + MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
2496 }
2497
2498 static irqreturn_t vnt_interrupt(int irq, void *arg)
2499 {
2500 struct vnt_private *priv = arg;
2501
2502 - if (priv->vif)
2503 - schedule_work(&priv->interrupt_work);
2504 + schedule_work(&priv->interrupt_work);
2505 +
2506 + MACvIntDisable(priv->PortOffset);
2507
2508 return IRQ_HANDLED;
2509 }
2510 diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
2511 index 8e4428725848..bfdd5ad4116f 100644
2512 --- a/drivers/tty/serial/atmel_serial.c
2513 +++ b/drivers/tty/serial/atmel_serial.c
2514 @@ -1156,6 +1156,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
2515 sg_dma_len(&atmel_port->sg_rx)/2,
2516 DMA_DEV_TO_MEM,
2517 DMA_PREP_INTERRUPT);
2518 + if (!desc) {
2519 + dev_err(port->dev, "Preparing DMA cyclic failed\n");
2520 + goto chan_err;
2521 + }
2522 desc->callback = atmel_complete_rx_dma;
2523 desc->callback_param = port;
2524 atmel_port->desc_rx = desc;
2525 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
2526 index 93d3a0ec5e11..b0aa864f84a9 100644
2527 --- a/drivers/tty/serial/kgdboc.c
2528 +++ b/drivers/tty/serial/kgdboc.c
2529 @@ -145,8 +145,10 @@ static int configure_kgdboc(void)
2530 char *cptr = config;
2531 struct console *cons;
2532
2533 - if (!strlen(config) || isspace(config[0]))
2534 + if (!strlen(config) || isspace(config[0])) {
2535 + err = 0;
2536 goto noconfig;
2537 + }
2538
2539 kgdboc_io_ops.is_console = 0;
2540 kgdb_tty_driver = NULL;
2541 diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
2542 index 3db48fcd6068..4c4070a202fb 100644
2543 --- a/drivers/tty/serial/max310x.c
2544 +++ b/drivers/tty/serial/max310x.c
2545 @@ -1419,6 +1419,8 @@ static int max310x_spi_probe(struct spi_device *spi)
2546 if (spi->dev.of_node) {
2547 const struct of_device_id *of_id =
2548 of_match_device(max310x_dt_ids, &spi->dev);
2549 + if (!of_id)
2550 + return -ENODEV;
2551
2552 devtype = (struct max310x_devtype *)of_id->data;
2553 } else {
2554 diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
2555 index 170e446a2f62..7d26c9b57d8e 100644
2556 --- a/drivers/tty/serial/mvebu-uart.c
2557 +++ b/drivers/tty/serial/mvebu-uart.c
2558 @@ -799,6 +799,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
2559 return -EINVAL;
2560 }
2561
2562 + if (!match)
2563 + return -ENODEV;
2564 +
2565 /* Assume that all UART ports have a DT alias or none has */
2566 id = of_alias_get_id(pdev->dev.of_node, "serial");
2567 if (!pdev->dev.of_node || id < 0)
2568 diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
2569 index 76aa289652f7..34acdf29713d 100644
2570 --- a/drivers/tty/serial/mxs-auart.c
2571 +++ b/drivers/tty/serial/mxs-auart.c
2572 @@ -1685,6 +1685,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
2573
2574 s->port.mapbase = r->start;
2575 s->port.membase = ioremap(r->start, resource_size(r));
2576 + if (!s->port.membase) {
2577 + ret = -ENOMEM;
2578 + goto out_disable_clks;
2579 + }
2580 s->port.ops = &mxs_auart_ops;
2581 s->port.iotype = UPIO_MEM;
2582 s->port.fifosize = MXS_AUART_FIFO_SIZE;
2583 diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
2584 index 35d1f6fa0e3c..5b96df4ad5b3 100644
2585 --- a/drivers/tty/serial/qcom_geni_serial.c
2586 +++ b/drivers/tty/serial/qcom_geni_serial.c
2587 @@ -1052,7 +1052,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
2588 {
2589 struct uart_port *uport;
2590 struct qcom_geni_serial_port *port;
2591 - int baud;
2592 + int baud = 9600;
2593 int bits = 8;
2594 int parity = 'n';
2595 int flow = 'n';
2596 diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
2597 index 859b173e3b82..cbbf239aea0f 100644
2598 --- a/drivers/tty/serial/sh-sci.c
2599 +++ b/drivers/tty/serial/sh-sci.c
2600 @@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
2601
2602 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
2603 uart_write_wakeup(port);
2604 - if (uart_circ_empty(xmit)) {
2605 + if (uart_circ_empty(xmit))
2606 sci_stop_tx(port);
2607 - } else {
2608 - ctrl = serial_port_in(port, SCSCR);
2609 -
2610 - if (port->type != PORT_SCI) {
2611 - serial_port_in(port, SCxSR); /* Dummy read */
2612 - sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
2613 - }
2614
2615 - ctrl |= SCSCR_TIE;
2616 - serial_port_out(port, SCSCR, ctrl);
2617 - }
2618 }
2619
2620 /* On SH3, SCIF may read end-of-break as a space->mark char */
2621 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2622 index 08b8aa5299b5..32da5a4182ac 100644
2623 --- a/drivers/usb/class/cdc-acm.c
2624 +++ b/drivers/usb/class/cdc-acm.c
2625 @@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
2626 clear_bit(EVENT_RX_STALL, &acm->flags);
2627 }
2628
2629 - if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
2630 + if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
2631 tty_port_tty_wakeup(&acm->port);
2632 - clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
2633 - }
2634 }
2635
2636 /*
2637 diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
2638 index 48277bbc15e4..73c8e6591746 100644
2639 --- a/drivers/usb/common/common.c
2640 +++ b/drivers/usb/common/common.c
2641 @@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
2642
2643 do {
2644 controller = of_find_node_with_property(controller, "phys");
2645 + if (!of_device_is_available(controller))
2646 + continue;
2647 index = 0;
2648 do {
2649 if (arg0 == -1) {
2650 diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
2651 index 54e859dcb25c..492bb44153b3 100644
2652 --- a/drivers/usb/gadget/function/f_hid.c
2653 +++ b/drivers/usb/gadget/function/f_hid.c
2654 @@ -391,20 +391,20 @@ try_again:
2655 req->complete = f_hidg_req_complete;
2656 req->context = hidg;
2657
2658 + spin_unlock_irqrestore(&hidg->write_spinlock, flags);
2659 +
2660 status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
2661 if (status < 0) {
2662 ERROR(hidg->func.config->cdev,
2663 "usb_ep_queue error on int endpoint %zd\n", status);
2664 - goto release_write_pending_unlocked;
2665 + goto release_write_pending;
2666 } else {
2667 status = count;
2668 }
2669 - spin_unlock_irqrestore(&hidg->write_spinlock, flags);
2670
2671 return status;
2672 release_write_pending:
2673 spin_lock_irqsave(&hidg->write_spinlock, flags);
2674 -release_write_pending_unlocked:
2675 hidg->write_pending = 0;
2676 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
2677
2678 diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
2679 index 86cff5c28eff..ba841c569c48 100644
2680 --- a/drivers/usb/host/xhci-dbgcap.c
2681 +++ b/drivers/usb/host/xhci-dbgcap.c
2682 @@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
2683 return -1;
2684
2685 writel(0, &dbc->regs->control);
2686 - xhci_dbc_mem_cleanup(xhci);
2687 dbc->state = DS_DISABLED;
2688
2689 return 0;
2690 @@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
2691 ret = xhci_do_dbc_stop(xhci);
2692 spin_unlock_irqrestore(&dbc->lock, flags);
2693
2694 - if (!ret)
2695 + if (!ret) {
2696 + xhci_dbc_mem_cleanup(xhci);
2697 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
2698 + }
2699 }
2700
2701 static void
2702 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
2703 index 01b5818a4be5..333f9202ec8b 100644
2704 --- a/drivers/usb/host/xhci-hub.c
2705 +++ b/drivers/usb/host/xhci-hub.c
2706 @@ -1501,20 +1501,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
2707 port_index = max_ports;
2708 while (port_index--) {
2709 u32 t1, t2;
2710 -
2711 + int retries = 10;
2712 +retry:
2713 t1 = readl(ports[port_index]->addr);
2714 t2 = xhci_port_state_to_neutral(t1);
2715 portsc_buf[port_index] = 0;
2716
2717 - /* Bail out if a USB3 port has a new device in link training */
2718 - if ((hcd->speed >= HCD_USB3) &&
2719 + /*
2720 + * Give a USB3 port in link training time to finish, but don't
2721 + * prevent suspend as port might be stuck
2722 + */
2723 + if ((hcd->speed >= HCD_USB3) && retries-- &&
2724 (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
2725 - bus_state->bus_suspended = 0;
2726 spin_unlock_irqrestore(&xhci->lock, flags);
2727 - xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
2728 - return -EBUSY;
2729 + msleep(XHCI_PORT_POLLING_LFPS_TIME);
2730 + spin_lock_irqsave(&xhci->lock, flags);
2731 + xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
2732 + port_index);
2733 + goto retry;
2734 }
2735 -
2736 /* suspend ports in U0, or bail out for new connect changes */
2737 if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
2738 if ((t1 & PORT_CSC) && wake_enabled) {
2739 diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
2740 index a6e463715779..671bce18782c 100644
2741 --- a/drivers/usb/host/xhci-rcar.c
2742 +++ b/drivers/usb/host/xhci-rcar.c
2743 @@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
2744 if (!xhci_rcar_wait_for_pll_active(hcd))
2745 return -ETIMEDOUT;
2746
2747 + xhci->quirks |= XHCI_TRUST_TX_LENGTH;
2748 return xhci_rcar_download_firmware(hcd);
2749 }
2750
2751 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2752 index 9ae17a666bdb..f054464347c9 100644
2753 --- a/drivers/usb/host/xhci-ring.c
2754 +++ b/drivers/usb/host/xhci-ring.c
2755 @@ -1643,10 +1643,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
2756 }
2757 }
2758
2759 - if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
2760 - DEV_SUPERSPEED_ANY(portsc)) {
2761 + if ((portsc & PORT_PLC) &&
2762 + DEV_SUPERSPEED_ANY(portsc) &&
2763 + ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
2764 + (portsc & PORT_PLS_MASK) == XDEV_U1 ||
2765 + (portsc & PORT_PLS_MASK) == XDEV_U2)) {
2766 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
2767 - /* We've just brought the device into U0 through either the
2768 + /* We've just brought the device into U0/1/2 through either the
2769 * Resume state after a device remote wakeup, or through the
2770 * U3Exit state after a host-initiated resume. If it's a device
2771 * initiated remote wake, don't pass up the link state change,
2772 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2773 index e88060ea1e33..dc00f59c8e69 100644
2774 --- a/drivers/usb/host/xhci.h
2775 +++ b/drivers/usb/host/xhci.h
2776 @@ -452,6 +452,14 @@ struct xhci_op_regs {
2777 */
2778 #define XHCI_DEFAULT_BESL 4
2779
2780 +/*
2781 + * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
2782 + * to complete link training. usually link trainig completes much faster
2783 + * so check status 10 times with 36ms sleep in places we need to wait for
2784 + * polling to complete.
2785 + */
2786 +#define XHCI_PORT_POLLING_LFPS_TIME 36
2787 +
2788 /**
2789 * struct xhci_intr_reg - Interrupt Register Set
2790 * @irq_pending: IMAN - Interrupt Management Register. Used to enable
2791 diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
2792 index 40bbf1f53337..fe58904f350b 100644
2793 --- a/drivers/usb/mtu3/Kconfig
2794 +++ b/drivers/usb/mtu3/Kconfig
2795 @@ -4,6 +4,7 @@ config USB_MTU3
2796 tristate "MediaTek USB3 Dual Role controller"
2797 depends on USB || USB_GADGET
2798 depends on ARCH_MEDIATEK || COMPILE_TEST
2799 + depends on EXTCON || !EXTCON
2800 select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
2801 help
2802 Say Y or M here if your system runs on MediaTek SoCs with
2803 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2804 index 4c66edf533fe..e732949f6567 100644
2805 --- a/drivers/usb/serial/cp210x.c
2806 +++ b/drivers/usb/serial/cp210x.c
2807 @@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
2808 { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
2809 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
2810 { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
2811 + { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
2812 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
2813 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
2814 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
2815 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2816 index 1d8077e880a0..c0dc4bc776db 100644
2817 --- a/drivers/usb/serial/ftdi_sio.c
2818 +++ b/drivers/usb/serial/ftdi_sio.c
2819 @@ -599,6 +599,8 @@ static const struct usb_device_id id_table_combined[] = {
2820 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2821 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
2822 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2823 + { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
2824 + { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
2825 { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
2826 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
2827 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
2828 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2829 index b863bedb55a1..5755f0df0025 100644
2830 --- a/drivers/usb/serial/ftdi_sio_ids.h
2831 +++ b/drivers/usb/serial/ftdi_sio_ids.h
2832 @@ -567,7 +567,9 @@
2833 /*
2834 * NovaTech product ids (FTDI_VID)
2835 */
2836 -#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
2837 +#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
2838 +#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
2839 +#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
2840
2841 /*
2842 * Synapse Wireless product ids (FTDI_VID)
2843 diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
2844 index 27109522fd8b..e8f275a0326d 100644
2845 --- a/drivers/usb/serial/mos7720.c
2846 +++ b/drivers/usb/serial/mos7720.c
2847 @@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
2848 if (!urbtrack)
2849 return -ENOMEM;
2850
2851 - kref_get(&mos_parport->ref_count);
2852 - urbtrack->mos_parport = mos_parport;
2853 urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
2854 if (!urbtrack->urb) {
2855 kfree(urbtrack);
2856 @@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
2857 usb_sndctrlpipe(usbdev, 0),
2858 (unsigned char *)urbtrack->setup,
2859 NULL, 0, async_complete, urbtrack);
2860 + kref_get(&mos_parport->ref_count);
2861 + urbtrack->mos_parport = mos_parport;
2862 kref_init(&urbtrack->ref_count);
2863 INIT_LIST_HEAD(&urbtrack->urblist_entry);
2864
2865 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2866 index faf833e8f557..d8c474b386a8 100644
2867 --- a/drivers/usb/serial/option.c
2868 +++ b/drivers/usb/serial/option.c
2869 @@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
2870 #define QUECTEL_PRODUCT_EC25 0x0125
2871 #define QUECTEL_PRODUCT_BG96 0x0296
2872 #define QUECTEL_PRODUCT_EP06 0x0306
2873 +#define QUECTEL_PRODUCT_EM12 0x0512
2874
2875 #define CMOTECH_VENDOR_ID 0x16d8
2876 #define CMOTECH_PRODUCT_6001 0x6001
2877 @@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
2878 .driver_info = RSVD(3) },
2879 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
2880 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
2881 - { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
2882 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
2883 + .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
2884 /* Quectel products using Qualcomm vendor ID */
2885 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
2886 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
2887 @@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
2888 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
2889 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
2890 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
2891 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
2892 + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
2893 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
2894 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
2895 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
2896 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
2897 @@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
2898 .driver_info = RSVD(4) },
2899 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
2900 .driver_info = RSVD(4) },
2901 - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
2902 - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
2903 - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
2904 - { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
2905 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
2906 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
2907 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
2908 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
2909 + .driver_info = RSVD(4) },
2910 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
2911 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
2912 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
2913 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
2914 diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
2915 index e61dffb27a0c..00141e05bc72 100644
2916 --- a/drivers/usb/typec/class.c
2917 +++ b/drivers/usb/typec/class.c
2918 @@ -1500,7 +1500,7 @@ typec_port_register_altmode(struct typec_port *port,
2919
2920 sprintf(id, "id%04xm%02x", desc->svid, desc->mode);
2921
2922 - mux = typec_mux_get(port->dev.parent, id);
2923 + mux = typec_mux_get(&port->dev, id);
2924 if (IS_ERR(mux))
2925 return ERR_CAST(mux);
2926
2927 @@ -1540,18 +1540,6 @@ struct typec_port *typec_register_port(struct device *parent,
2928 return ERR_PTR(id);
2929 }
2930
2931 - port->sw = typec_switch_get(cap->fwnode ? &port->dev : parent);
2932 - if (IS_ERR(port->sw)) {
2933 - ret = PTR_ERR(port->sw);
2934 - goto err_switch;
2935 - }
2936 -
2937 - port->mux = typec_mux_get(parent, "typec-mux");
2938 - if (IS_ERR(port->mux)) {
2939 - ret = PTR_ERR(port->mux);
2940 - goto err_mux;
2941 - }
2942 -
2943 switch (cap->type) {
2944 case TYPEC_PORT_SRC:
2945 port->pwr_role = TYPEC_SOURCE;
2946 @@ -1592,13 +1580,26 @@ struct typec_port *typec_register_port(struct device *parent,
2947 port->port_type = cap->type;
2948 port->prefer_role = cap->prefer_role;
2949
2950 + device_initialize(&port->dev);
2951 port->dev.class = typec_class;
2952 port->dev.parent = parent;
2953 port->dev.fwnode = cap->fwnode;
2954 port->dev.type = &typec_port_dev_type;
2955 dev_set_name(&port->dev, "port%d", id);
2956
2957 - ret = device_register(&port->dev);
2958 + port->sw = typec_switch_get(&port->dev);
2959 + if (IS_ERR(port->sw)) {
2960 + put_device(&port->dev);
2961 + return ERR_CAST(port->sw);
2962 + }
2963 +
2964 + port->mux = typec_mux_get(&port->dev, "typec-mux");
2965 + if (IS_ERR(port->mux)) {
2966 + put_device(&port->dev);
2967 + return ERR_CAST(port->mux);
2968 + }
2969 +
2970 + ret = device_add(&port->dev);
2971 if (ret) {
2972 dev_err(parent, "failed to register port (%d)\n", ret);
2973 put_device(&port->dev);
2974 @@ -1606,15 +1607,6 @@ struct typec_port *typec_register_port(struct device *parent,
2975 }
2976
2977 return port;
2978 -
2979 -err_mux:
2980 - typec_switch_put(port->sw);
2981 -
2982 -err_switch:
2983 - ida_simple_remove(&typec_index_ida, port->id);
2984 - kfree(port);
2985 -
2986 - return ERR_PTR(ret);
2987 }
2988 EXPORT_SYMBOL_GPL(typec_register_port);
2989
2990 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2991 index a16760b410b1..c0db7785cede 100644
2992 --- a/fs/btrfs/extent-tree.c
2993 +++ b/fs/btrfs/extent-tree.c
2994 @@ -5872,7 +5872,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
2995 *
2996 * This is overestimating in most cases.
2997 */
2998 - qgroup_rsv_size = outstanding_extents * fs_info->nodesize;
2999 + qgroup_rsv_size = (u64)outstanding_extents * fs_info->nodesize;
3000
3001 spin_lock(&block_rsv->lock);
3002 block_rsv->size = reserve_size;
3003 diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
3004 index df41d7049936..927f9f3daddb 100644
3005 --- a/fs/btrfs/raid56.c
3006 +++ b/fs/btrfs/raid56.c
3007 @@ -2429,8 +2429,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
3008 bitmap_clear(rbio->dbitmap, pagenr, 1);
3009 kunmap(p);
3010
3011 - for (stripe = 0; stripe < rbio->real_stripes; stripe++)
3012 + for (stripe = 0; stripe < nr_data; stripe++)
3013 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
3014 + kunmap(p_page);
3015 }
3016
3017 __free_page(p_page);
3018 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3019 index 0805f8c5e72d..2f4f0958e5f2 100644
3020 --- a/fs/btrfs/tree-log.c
3021 +++ b/fs/btrfs/tree-log.c
3022 @@ -3532,9 +3532,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3023 }
3024 btrfs_release_path(path);
3025
3026 - /* find the first key from this transaction again */
3027 + /*
3028 + * Find the first key from this transaction again. See the note for
3029 + * log_new_dir_dentries, if we're logging a directory recursively we
3030 + * won't be holding its i_mutex, which means we can modify the directory
3031 + * while we're logging it. If we remove an entry between our first
3032 + * search and this search we'll not find the key again and can just
3033 + * bail.
3034 + */
3035 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3036 - if (WARN_ON(ret != 0))
3037 + if (ret != 0)
3038 goto done;
3039
3040 /*
3041 @@ -4504,6 +4511,19 @@ static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
3042 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3043 struct btrfs_inode_item);
3044 *size_ret = btrfs_inode_size(path->nodes[0], item);
3045 + /*
3046 + * If the in-memory inode's i_size is smaller then the inode
3047 + * size stored in the btree, return the inode's i_size, so
3048 + * that we get a correct inode size after replaying the log
3049 + * when before a power failure we had a shrinking truncate
3050 + * followed by addition of a new name (rename / new hard link).
3051 + * Otherwise return the inode size from the btree, to avoid
3052 + * data loss when replaying a log due to previously doing a
3053 + * write that expands the inode's size and logging a new name
3054 + * immediately after.
3055 + */
3056 + if (*size_ret > inode->vfs_inode.i_size)
3057 + *size_ret = inode->vfs_inode.i_size;
3058 }
3059
3060 btrfs_release_path(path);
3061 @@ -4665,15 +4685,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
3062 struct btrfs_file_extent_item);
3063
3064 if (btrfs_file_extent_type(leaf, extent) ==
3065 - BTRFS_FILE_EXTENT_INLINE) {
3066 - len = btrfs_file_extent_ram_bytes(leaf, extent);
3067 - ASSERT(len == i_size ||
3068 - (len == fs_info->sectorsize &&
3069 - btrfs_file_extent_compression(leaf, extent) !=
3070 - BTRFS_COMPRESS_NONE) ||
3071 - (len < i_size && i_size < fs_info->sectorsize));
3072 + BTRFS_FILE_EXTENT_INLINE)
3073 return 0;
3074 - }
3075
3076 len = btrfs_file_extent_num_bytes(leaf, extent);
3077 /* Last extent goes beyond i_size, no need to log a hole. */
3078 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
3079 index c13f62182513..207f4e87445d 100644
3080 --- a/fs/btrfs/volumes.c
3081 +++ b/fs/btrfs/volumes.c
3082 @@ -6051,7 +6051,7 @@ static void btrfs_end_bio(struct bio *bio)
3083 if (bio_op(bio) == REQ_OP_WRITE)
3084 btrfs_dev_stat_inc_and_print(dev,
3085 BTRFS_DEV_STAT_WRITE_ERRS);
3086 - else
3087 + else if (!(bio->bi_opf & REQ_RAHEAD))
3088 btrfs_dev_stat_inc_and_print(dev,
3089 BTRFS_DEV_STAT_READ_ERRS);
3090 if (bio->bi_opf & REQ_PREFLUSH)
3091 diff --git a/fs/lockd/host.c b/fs/lockd/host.c
3092 index 93fb7cf0b92b..f0b5c987d6ae 100644
3093 --- a/fs/lockd/host.c
3094 +++ b/fs/lockd/host.c
3095 @@ -290,12 +290,11 @@ void nlmclnt_release_host(struct nlm_host *host)
3096
3097 WARN_ON_ONCE(host->h_server);
3098
3099 - if (refcount_dec_and_test(&host->h_count)) {
3100 + if (refcount_dec_and_mutex_lock(&host->h_count, &nlm_host_mutex)) {
3101 WARN_ON_ONCE(!list_empty(&host->h_lockowners));
3102 WARN_ON_ONCE(!list_empty(&host->h_granted));
3103 WARN_ON_ONCE(!list_empty(&host->h_reclaim));
3104
3105 - mutex_lock(&nlm_host_mutex);
3106 nlm_destroy_host_locked(host);
3107 mutex_unlock(&nlm_host_mutex);
3108 }
3109 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3110 index e7abcf7629b3..580e37bc3fe2 100644
3111 --- a/fs/nfs/nfs4proc.c
3112 +++ b/fs/nfs/nfs4proc.c
3113 @@ -2909,7 +2909,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
3114 }
3115
3116 out:
3117 - nfs4_sequence_free_slot(&opendata->o_res.seq_res);
3118 + if (!opendata->cancelled)
3119 + nfs4_sequence_free_slot(&opendata->o_res.seq_res);
3120 return ret;
3121 }
3122
3123 diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
3124 index 7a5ee145c733..fc197e599e8c 100644
3125 --- a/fs/ocfs2/refcounttree.c
3126 +++ b/fs/ocfs2/refcounttree.c
3127 @@ -4716,22 +4716,23 @@ out:
3128
3129 /* Lock an inode and grab a bh pointing to the inode. */
3130 static int ocfs2_reflink_inodes_lock(struct inode *s_inode,
3131 - struct buffer_head **bh1,
3132 + struct buffer_head **bh_s,
3133 struct inode *t_inode,
3134 - struct buffer_head **bh2)
3135 + struct buffer_head **bh_t)
3136 {
3137 - struct inode *inode1;
3138 - struct inode *inode2;
3139 + struct inode *inode1 = s_inode;
3140 + struct inode *inode2 = t_inode;
3141 struct ocfs2_inode_info *oi1;
3142 struct ocfs2_inode_info *oi2;
3143 + struct buffer_head *bh1 = NULL;
3144 + struct buffer_head *bh2 = NULL;
3145 bool same_inode = (s_inode == t_inode);
3146 + bool need_swap = (inode1->i_ino > inode2->i_ino);
3147 int status;
3148
3149 /* First grab the VFS and rw locks. */
3150 lock_two_nondirectories(s_inode, t_inode);
3151 - inode1 = s_inode;
3152 - inode2 = t_inode;
3153 - if (inode1->i_ino > inode2->i_ino)
3154 + if (need_swap)
3155 swap(inode1, inode2);
3156
3157 status = ocfs2_rw_lock(inode1, 1);
3158 @@ -4754,17 +4755,13 @@ static int ocfs2_reflink_inodes_lock(struct inode *s_inode,
3159 trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
3160 (unsigned long long)oi2->ip_blkno);
3161
3162 - if (*bh1)
3163 - *bh1 = NULL;
3164 - if (*bh2)
3165 - *bh2 = NULL;
3166 -
3167 /* We always want to lock the one with the lower lockid first. */
3168 if (oi1->ip_blkno > oi2->ip_blkno)
3169 mlog_errno(-ENOLCK);
3170
3171 /* lock id1 */
3172 - status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET);
3173 + status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
3174 + OI_LS_REFLINK_TARGET);
3175 if (status < 0) {
3176 if (status != -ENOENT)
3177 mlog_errno(status);
3178 @@ -4773,15 +4770,25 @@ static int ocfs2_reflink_inodes_lock(struct inode *s_inode,
3179
3180 /* lock id2 */
3181 if (!same_inode) {
3182 - status = ocfs2_inode_lock_nested(inode2, bh2, 1,
3183 + status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
3184 OI_LS_REFLINK_TARGET);
3185 if (status < 0) {
3186 if (status != -ENOENT)
3187 mlog_errno(status);
3188 goto out_cl1;
3189 }
3190 - } else
3191 - *bh2 = *bh1;
3192 + } else {
3193 + bh2 = bh1;
3194 + }
3195 +
3196 + /*
3197 + * If we swapped inode order above, we have to swap the buffer heads
3198 + * before passing them back to the caller.
3199 + */
3200 + if (need_swap)
3201 + swap(bh1, bh2);
3202 + *bh_s = bh1;
3203 + *bh_t = bh2;
3204
3205 trace_ocfs2_double_lock_end(
3206 (unsigned long long)oi1->ip_blkno,
3207 @@ -4791,8 +4798,7 @@ static int ocfs2_reflink_inodes_lock(struct inode *s_inode,
3208
3209 out_cl1:
3210 ocfs2_inode_unlock(inode1, 1);
3211 - brelse(*bh1);
3212 - *bh1 = NULL;
3213 + brelse(bh1);
3214 out_rw2:
3215 ocfs2_rw_unlock(inode2, 1);
3216 out_i2:
3217 diff --git a/fs/open.c b/fs/open.c
3218 index 0285ce7dbd51..f1c2f855fd43 100644
3219 --- a/fs/open.c
3220 +++ b/fs/open.c
3221 @@ -733,6 +733,12 @@ static int do_dentry_open(struct file *f,
3222 return 0;
3223 }
3224
3225 + /* Any file opened for execve()/uselib() has to be a regular file. */
3226 + if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
3227 + error = -EACCES;
3228 + goto cleanup_file;
3229 + }
3230 +
3231 if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
3232 error = get_write_access(inode);
3233 if (unlikely(error))
3234 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
3235 index 4d598a399bbf..d65390727541 100644
3236 --- a/fs/proc/proc_sysctl.c
3237 +++ b/fs/proc/proc_sysctl.c
3238 @@ -1626,7 +1626,8 @@ static void drop_sysctl_table(struct ctl_table_header *header)
3239 if (--header->nreg)
3240 return;
3241
3242 - put_links(header);
3243 + if (parent)
3244 + put_links(header);
3245 start_unregistering(header);
3246 if (!--header->count)
3247 kfree_rcu(header, rcu);
3248 diff --git a/include/linux/device.h b/include/linux/device.h
3249 index 8f882549edee..3f1066a9e1c3 100644
3250 --- a/include/linux/device.h
3251 +++ b/include/linux/device.h
3252 @@ -773,6 +773,30 @@ struct device *device_connection_find(struct device *dev, const char *con_id);
3253 void device_connection_add(struct device_connection *con);
3254 void device_connection_remove(struct device_connection *con);
3255
3256 +/**
3257 + * device_connections_add - Add multiple device connections at once
3258 + * @cons: Zero terminated array of device connection descriptors
3259 + */
3260 +static inline void device_connections_add(struct device_connection *cons)
3261 +{
3262 + struct device_connection *c;
3263 +
3264 + for (c = cons; c->endpoint[0]; c++)
3265 + device_connection_add(c);
3266 +}
3267 +
3268 +/**
3269 + * device_connections_remove - Remove multiple device connections at once
3270 + * @cons: Zero terminated array of device connection descriptors
3271 + */
3272 +static inline void device_connections_remove(struct device_connection *cons)
3273 +{
3274 + struct device_connection *c;
3275 +
3276 + for (c = cons; c->endpoint[0]; c++)
3277 + device_connection_remove(c);
3278 +}
3279 +
3280 /**
3281 * enum device_link_state - Device link states.
3282 * @DL_STATE_NONE: The presence of the drivers is not being tracked.
3283 diff --git a/include/linux/slab.h b/include/linux/slab.h
3284 index ed9cbddeb4a6..d6393413ef09 100644
3285 --- a/include/linux/slab.h
3286 +++ b/include/linux/slab.h
3287 @@ -32,6 +32,8 @@
3288 #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
3289 /* Use GFP_DMA memory */
3290 #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
3291 +/* Use GFP_DMA32 memory */
3292 +#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
3293 /* DEBUG: Store the last owner for bug hunting */
3294 #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
3295 /* Panic if kmem_cache_create() fails */
3296 diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
3297 index 32ee65a30aff..1c6e6c0766ca 100644
3298 --- a/include/net/sctp/checksum.h
3299 +++ b/include/net/sctp/checksum.h
3300 @@ -61,7 +61,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
3301 static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
3302 unsigned int offset)
3303 {
3304 - struct sctphdr *sh = sctp_hdr(skb);
3305 + struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
3306 const struct skb_checksum_ops ops = {
3307 .update = sctp_csum_update,
3308 .combine = sctp_csum_combine,
3309 diff --git a/include/net/sock.h b/include/net/sock.h
3310 index 6cb5a545df7d..1ece7736c49c 100644
3311 --- a/include/net/sock.h
3312 +++ b/include/net/sock.h
3313 @@ -710,6 +710,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
3314 hlist_add_head_rcu(&sk->sk_node, list);
3315 }
3316
3317 +static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
3318 +{
3319 + sock_hold(sk);
3320 + hlist_add_tail_rcu(&sk->sk_node, list);
3321 +}
3322 +
3323 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
3324 {
3325 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
3326 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3327 index bcb42aaf1b3a..acc2305ad895 100644
3328 --- a/kernel/bpf/verifier.c
3329 +++ b/kernel/bpf/verifier.c
3330 @@ -2815,7 +2815,7 @@ do_sim:
3331 *dst_reg = *ptr_reg;
3332 }
3333 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3334 - if (!ptr_is_dst_reg)
3335 + if (!ptr_is_dst_reg && ret)
3336 *dst_reg = tmp;
3337 return !ret ? -EFAULT : 0;
3338 }
3339 diff --git a/kernel/cpu.c b/kernel/cpu.c
3340 index 56f657adcf03..9d0ecc4a0e79 100644
3341 --- a/kernel/cpu.c
3342 +++ b/kernel/cpu.c
3343 @@ -533,6 +533,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
3344 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
3345 }
3346
3347 +static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
3348 +{
3349 + if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
3350 + return true;
3351 + /*
3352 + * When CPU hotplug is disabled, then taking the CPU down is not
3353 + * possible because takedown_cpu() and the architecture and
3354 + * subsystem specific mechanisms are not available. So the CPU
3355 + * which would be completely unplugged again needs to stay around
3356 + * in the current state.
3357 + */
3358 + return st->state <= CPUHP_BRINGUP_CPU;
3359 +}
3360 +
3361 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
3362 enum cpuhp_state target)
3363 {
3364 @@ -543,8 +557,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
3365 st->state++;
3366 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
3367 if (ret) {
3368 - st->target = prev_state;
3369 - undo_cpu_up(cpu, st);
3370 + if (can_rollback_cpu(st)) {
3371 + st->target = prev_state;
3372 + undo_cpu_up(cpu, st);
3373 + }
3374 break;
3375 }
3376 }
3377 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
3378 index 977918d5d350..bbc4940f21af 100644
3379 --- a/kernel/watchdog.c
3380 +++ b/kernel/watchdog.c
3381 @@ -547,13 +547,15 @@ static void softlockup_start_all(void)
3382
3383 int lockup_detector_online_cpu(unsigned int cpu)
3384 {
3385 - watchdog_enable(cpu);
3386 + if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
3387 + watchdog_enable(cpu);
3388 return 0;
3389 }
3390
3391 int lockup_detector_offline_cpu(unsigned int cpu)
3392 {
3393 - watchdog_disable(cpu);
3394 + if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
3395 + watchdog_disable(cpu);
3396 return 0;
3397 }
3398
3399 diff --git a/lib/rhashtable.c b/lib/rhashtable.c
3400 index 30526afa8343..6410c857b048 100644
3401 --- a/lib/rhashtable.c
3402 +++ b/lib/rhashtable.c
3403 @@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
3404 else if (tbl->nest)
3405 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
3406
3407 - if (!err)
3408 - err = rhashtable_rehash_table(ht);
3409 + if (!err || err == -EEXIST) {
3410 + int nerr;
3411 +
3412 + nerr = rhashtable_rehash_table(ht);
3413 + err = err ?: nerr;
3414 + }
3415
3416 mutex_unlock(&ht->mutex);
3417
3418 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
3419 index 89d4439516f6..f32d0a5be4fb 100644
3420 --- a/mm/mempolicy.c
3421 +++ b/mm/mempolicy.c
3422 @@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
3423 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
3424 }
3425
3426 +/*
3427 + * queue_pages_pmd() has three possible return values:
3428 + * 1 - pages are placed on the right node or queued successfully.
3429 + * 0 - THP was split.
3430 + * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
3431 + * page was already on a node that does not follow the policy.
3432 + */
3433 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
3434 unsigned long end, struct mm_walk *walk)
3435 {
3436 @@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
3437 unsigned long flags;
3438
3439 if (unlikely(is_pmd_migration_entry(*pmd))) {
3440 - ret = 1;
3441 + ret = -EIO;
3442 goto unlock;
3443 }
3444 page = pmd_page(*pmd);
3445 @@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
3446 ret = 1;
3447 flags = qp->flags;
3448 /* go to thp migration */
3449 - if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
3450 + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
3451 + if (!vma_migratable(walk->vma)) {
3452 + ret = -EIO;
3453 + goto unlock;
3454 + }
3455 +
3456 migrate_page_add(page, qp->pagelist, flags);
3457 + } else
3458 + ret = -EIO;
3459 unlock:
3460 spin_unlock(ptl);
3461 out:
3462 @@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
3463 ptl = pmd_trans_huge_lock(pmd, vma);
3464 if (ptl) {
3465 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
3466 - if (ret)
3467 + if (ret > 0)
3468 return 0;
3469 + else if (ret < 0)
3470 + return ret;
3471 }
3472
3473 if (pmd_trans_unstable(pmd))
3474 @@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
3475 continue;
3476 if (!queue_pages_required(page, qp))
3477 continue;
3478 - migrate_page_add(page, qp->pagelist, flags);
3479 + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
3480 + if (!vma_migratable(vma))
3481 + break;
3482 + migrate_page_add(page, qp->pagelist, flags);
3483 + } else
3484 + break;
3485 }
3486 pte_unmap_unlock(pte - 1, ptl);
3487 cond_resched();
3488 - return 0;
3489 + return addr != end ? -EIO : 0;
3490 }
3491
3492 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
3493 @@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
3494 unsigned long endvma = vma->vm_end;
3495 unsigned long flags = qp->flags;
3496
3497 - if (!vma_migratable(vma))
3498 + /*
3499 + * Need check MPOL_MF_STRICT to return -EIO if possible
3500 + * regardless of vma_migratable
3501 + */
3502 + if (!vma_migratable(vma) &&
3503 + !(flags & MPOL_MF_STRICT))
3504 return 1;
3505
3506 if (endvma > end)
3507 @@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
3508 }
3509
3510 /* queue pages from current vma */
3511 - if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
3512 + if (flags & MPOL_MF_VALID)
3513 return 0;
3514 return 1;
3515 }
3516 diff --git a/mm/migrate.c b/mm/migrate.c
3517 index 14779c4f9a60..b2ea7d1e6f24 100644
3518 --- a/mm/migrate.c
3519 +++ b/mm/migrate.c
3520 @@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
3521 pte = swp_entry_to_pte(entry);
3522 } else if (is_device_public_page(new)) {
3523 pte = pte_mkdevmap(pte);
3524 - flush_dcache_page(new);
3525 }
3526 - } else
3527 - flush_dcache_page(new);
3528 + }
3529
3530 #ifdef CONFIG_HUGETLB_PAGE
3531 if (PageHuge(new)) {
3532 @@ -983,6 +981,13 @@ static int move_to_new_page(struct page *newpage, struct page *page,
3533 */
3534 if (!PageMappingFlags(page))
3535 page->mapping = NULL;
3536 +
3537 + if (unlikely(is_zone_device_page(newpage))) {
3538 + if (is_device_public_page(newpage))
3539 + flush_dcache_page(newpage);
3540 + } else
3541 + flush_dcache_page(newpage);
3542 +
3543 }
3544 out:
3545 return rc;
3546 diff --git a/mm/slab.c b/mm/slab.c
3547 index fad6839e8eab..364e42d5a399 100644
3548 --- a/mm/slab.c
3549 +++ b/mm/slab.c
3550 @@ -2124,6 +2124,8 @@ done:
3551 cachep->allocflags = __GFP_COMP;
3552 if (flags & SLAB_CACHE_DMA)
3553 cachep->allocflags |= GFP_DMA;
3554 + if (flags & SLAB_CACHE_DMA32)
3555 + cachep->allocflags |= GFP_DMA32;
3556 if (flags & SLAB_RECLAIM_ACCOUNT)
3557 cachep->allocflags |= __GFP_RECLAIMABLE;
3558 cachep->size = size;
3559 diff --git a/mm/slab.h b/mm/slab.h
3560 index 58c6c1c2a78e..9632772e14be 100644
3561 --- a/mm/slab.h
3562 +++ b/mm/slab.h
3563 @@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
3564
3565
3566 /* Legal flag mask for kmem_cache_create(), for various configurations */
3567 -#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
3568 +#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
3569 + SLAB_CACHE_DMA32 | SLAB_PANIC | \
3570 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
3571
3572 #if defined(CONFIG_DEBUG_SLAB)
3573 diff --git a/mm/slab_common.c b/mm/slab_common.c
3574 index 3a7ac4f15194..4d3c2e76d1ba 100644
3575 --- a/mm/slab_common.c
3576 +++ b/mm/slab_common.c
3577 @@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
3578 SLAB_FAILSLAB | SLAB_KASAN)
3579
3580 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
3581 - SLAB_ACCOUNT)
3582 + SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
3583
3584 /*
3585 * Merge control. If this is set then no merging of slab caches will occur.
3586 diff --git a/mm/slub.c b/mm/slub.c
3587 index 8da34a8af53d..09c0e24a06d8 100644
3588 --- a/mm/slub.c
3589 +++ b/mm/slub.c
3590 @@ -3539,6 +3539,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3591 if (s->flags & SLAB_CACHE_DMA)
3592 s->allocflags |= GFP_DMA;
3593
3594 + if (s->flags & SLAB_CACHE_DMA32)
3595 + s->allocflags |= GFP_DMA32;
3596 +
3597 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3598 s->allocflags |= __GFP_RECLAIMABLE;
3599
3600 @@ -5633,6 +5636,8 @@ static char *create_unique_id(struct kmem_cache *s)
3601 */
3602 if (s->flags & SLAB_CACHE_DMA)
3603 *p++ = 'd';
3604 + if (s->flags & SLAB_CACHE_DMA32)
3605 + *p++ = 'D';
3606 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3607 *p++ = 'a';
3608 if (s->flags & SLAB_CONSISTENCY_CHECKS)
3609 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
3610 index d17a4736e47c..2c6eabf294b3 100644
3611 --- a/net/bluetooth/l2cap_core.c
3612 +++ b/net/bluetooth/l2cap_core.c
3613 @@ -3336,16 +3336,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3614
3615 while (len >= L2CAP_CONF_OPT_SIZE) {
3616 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3617 + if (len < 0)
3618 + break;
3619
3620 hint = type & L2CAP_CONF_HINT;
3621 type &= L2CAP_CONF_MASK;
3622
3623 switch (type) {
3624 case L2CAP_CONF_MTU:
3625 + if (olen != 2)
3626 + break;
3627 mtu = val;
3628 break;
3629
3630 case L2CAP_CONF_FLUSH_TO:
3631 + if (olen != 2)
3632 + break;
3633 chan->flush_to = val;
3634 break;
3635
3636 @@ -3353,26 +3359,30 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3637 break;
3638
3639 case L2CAP_CONF_RFC:
3640 - if (olen == sizeof(rfc))
3641 - memcpy(&rfc, (void *) val, olen);
3642 + if (olen != sizeof(rfc))
3643 + break;
3644 + memcpy(&rfc, (void *) val, olen);
3645 break;
3646
3647 case L2CAP_CONF_FCS:
3648 + if (olen != 1)
3649 + break;
3650 if (val == L2CAP_FCS_NONE)
3651 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3652 break;
3653
3654 case L2CAP_CONF_EFS:
3655 - if (olen == sizeof(efs)) {
3656 - remote_efs = 1;
3657 - memcpy(&efs, (void *) val, olen);
3658 - }
3659 + if (olen != sizeof(efs))
3660 + break;
3661 + remote_efs = 1;
3662 + memcpy(&efs, (void *) val, olen);
3663 break;
3664
3665 case L2CAP_CONF_EWS:
3666 + if (olen != 2)
3667 + break;
3668 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3669 return -ECONNREFUSED;
3670 -
3671 set_bit(FLAG_EXT_CTRL, &chan->flags);
3672 set_bit(CONF_EWS_RECV, &chan->conf_state);
3673 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3674 @@ -3382,7 +3392,6 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3675 default:
3676 if (hint)
3677 break;
3678 -
3679 result = L2CAP_CONF_UNKNOWN;
3680 *((u8 *) ptr++) = type;
3681 break;
3682 @@ -3547,58 +3556,65 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3683
3684 while (len >= L2CAP_CONF_OPT_SIZE) {
3685 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3686 + if (len < 0)
3687 + break;
3688
3689 switch (type) {
3690 case L2CAP_CONF_MTU:
3691 + if (olen != 2)
3692 + break;
3693 if (val < L2CAP_DEFAULT_MIN_MTU) {
3694 *result = L2CAP_CONF_UNACCEPT;
3695 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3696 } else
3697 chan->imtu = val;
3698 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3699 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3700 + endptr - ptr);
3701 break;
3702
3703 case L2CAP_CONF_FLUSH_TO:
3704 + if (olen != 2)
3705 + break;
3706 chan->flush_to = val;
3707 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3708 - 2, chan->flush_to, endptr - ptr);
3709 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3710 + chan->flush_to, endptr - ptr);
3711 break;
3712
3713 case L2CAP_CONF_RFC:
3714 - if (olen == sizeof(rfc))
3715 - memcpy(&rfc, (void *)val, olen);
3716 -
3717 + if (olen != sizeof(rfc))
3718 + break;
3719 + memcpy(&rfc, (void *)val, olen);
3720 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3721 rfc.mode != chan->mode)
3722 return -ECONNREFUSED;
3723 -
3724 chan->fcs = 0;
3725 -
3726 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3727 - sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3728 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3729 + (unsigned long) &rfc, endptr - ptr);
3730 break;
3731
3732 case L2CAP_CONF_EWS:
3733 + if (olen != 2)
3734 + break;
3735 chan->ack_win = min_t(u16, val, chan->ack_win);
3736 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3737 chan->tx_win, endptr - ptr);
3738 break;
3739
3740 case L2CAP_CONF_EFS:
3741 - if (olen == sizeof(efs)) {
3742 - memcpy(&efs, (void *)val, olen);
3743 -
3744 - if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3745 - efs.stype != L2CAP_SERV_NOTRAFIC &&
3746 - efs.stype != chan->local_stype)
3747 - return -ECONNREFUSED;
3748 -
3749 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3750 - (unsigned long) &efs, endptr - ptr);
3751 - }
3752 + if (olen != sizeof(efs))
3753 + break;
3754 + memcpy(&efs, (void *)val, olen);
3755 + if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3756 + efs.stype != L2CAP_SERV_NOTRAFIC &&
3757 + efs.stype != chan->local_stype)
3758 + return -ECONNREFUSED;
3759 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3760 + (unsigned long) &efs, endptr - ptr);
3761 break;
3762
3763 case L2CAP_CONF_FCS:
3764 + if (olen != 1)
3765 + break;
3766 if (*result == L2CAP_CONF_PENDING)
3767 if (val == L2CAP_FCS_NONE)
3768 set_bit(CONF_RECV_NO_FCS,
3769 @@ -3727,13 +3743,18 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3770
3771 while (len >= L2CAP_CONF_OPT_SIZE) {
3772 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3773 + if (len < 0)
3774 + break;
3775
3776 switch (type) {
3777 case L2CAP_CONF_RFC:
3778 - if (olen == sizeof(rfc))
3779 - memcpy(&rfc, (void *)val, olen);
3780 + if (olen != sizeof(rfc))
3781 + break;
3782 + memcpy(&rfc, (void *)val, olen);
3783 break;
3784 case L2CAP_CONF_EWS:
3785 + if (olen != 2)
3786 + break;
3787 txwin_ext = val;
3788 break;
3789 }
3790 diff --git a/net/core/datagram.c b/net/core/datagram.c
3791 index 57f3a6fcfc1e..a487df53a453 100644
3792 --- a/net/core/datagram.c
3793 +++ b/net/core/datagram.c
3794 @@ -279,7 +279,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
3795 break;
3796
3797 sk_busy_loop(sk, flags & MSG_DONTWAIT);
3798 - } while (!skb_queue_empty(&sk->sk_receive_queue));
3799 + } while (sk->sk_receive_queue.prev != *last);
3800
3801 error = -EAGAIN;
3802
3803 diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
3804 index 2aabb7eb0854..bf9a3b6ac885 100644
3805 --- a/net/core/net-sysfs.c
3806 +++ b/net/core/net-sysfs.c
3807 @@ -934,6 +934,8 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
3808 if (error)
3809 return error;
3810
3811 + dev_hold(queue->dev);
3812 +
3813 if (dev->sysfs_rx_queue_group) {
3814 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
3815 if (error) {
3816 @@ -943,7 +945,6 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
3817 }
3818
3819 kobject_uevent(kobj, KOBJ_ADD);
3820 - dev_hold(queue->dev);
3821
3822 return error;
3823 }
3824 @@ -1472,6 +1473,8 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
3825 if (error)
3826 return error;
3827
3828 + dev_hold(queue->dev);
3829 +
3830 #ifdef CONFIG_BQL
3831 error = sysfs_create_group(kobj, &dql_group);
3832 if (error) {
3833 @@ -1481,7 +1484,6 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
3834 #endif
3835
3836 kobject_uevent(kobj, KOBJ_ADD);
3837 - dev_hold(queue->dev);
3838
3839 return 0;
3840 }
3841 diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
3842 index 6344f1b18a6a..58a401e9cf09 100644
3843 --- a/net/dccp/ipv6.c
3844 +++ b/net/dccp/ipv6.c
3845 @@ -433,8 +433,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
3846 newnp->ipv6_mc_list = NULL;
3847 newnp->ipv6_ac_list = NULL;
3848 newnp->ipv6_fl_list = NULL;
3849 - newnp->mcast_oif = inet6_iif(skb);
3850 - newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
3851 + newnp->mcast_oif = inet_iif(skb);
3852 + newnp->mcast_hops = ip_hdr(skb)->ttl;
3853
3854 /*
3855 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
3856 diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
3857 index 17c455ff69ff..7858fa9ea103 100644
3858 --- a/net/ipv6/ila/ila_xlat.c
3859 +++ b/net/ipv6/ila/ila_xlat.c
3860 @@ -420,6 +420,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
3861
3862 done:
3863 rhashtable_walk_stop(&iter);
3864 + rhashtable_walk_exit(&iter);
3865 return ret;
3866 }
3867
3868 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3869 index 66cc94427437..9006bb3c9e72 100644
3870 --- a/net/ipv6/route.c
3871 +++ b/net/ipv6/route.c
3872 @@ -1048,14 +1048,20 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
3873 struct rt6_info *nrt;
3874
3875 if (!fib6_info_hold_safe(rt))
3876 - return NULL;
3877 + goto fallback;
3878
3879 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
3880 - if (nrt)
3881 - ip6_rt_copy_init(nrt, rt);
3882 - else
3883 + if (!nrt) {
3884 fib6_info_release(rt);
3885 + goto fallback;
3886 + }
3887
3888 + ip6_rt_copy_init(nrt, rt);
3889 + return nrt;
3890 +
3891 +fallback:
3892 + nrt = dev_net(dev)->ipv6.ip6_null_entry;
3893 + dst_hold(&nrt->dst);
3894 return nrt;
3895 }
3896
3897 @@ -1104,10 +1110,6 @@ restart:
3898 dst_hold(&rt->dst);
3899 } else {
3900 rt = ip6_create_rt_rcu(f6i);
3901 - if (!rt) {
3902 - rt = net->ipv6.ip6_null_entry;
3903 - dst_hold(&rt->dst);
3904 - }
3905 }
3906
3907 rcu_read_unlock();
3908 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3909 index 03e6b7a2bc53..e7cdfa92c382 100644
3910 --- a/net/ipv6/tcp_ipv6.c
3911 +++ b/net/ipv6/tcp_ipv6.c
3912 @@ -1108,11 +1108,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
3913 newnp->ipv6_fl_list = NULL;
3914 newnp->pktoptions = NULL;
3915 newnp->opt = NULL;
3916 - newnp->mcast_oif = tcp_v6_iif(skb);
3917 - newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
3918 - newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
3919 + newnp->mcast_oif = inet_iif(skb);
3920 + newnp->mcast_hops = ip_hdr(skb)->ttl;
3921 + newnp->rcv_flowinfo = 0;
3922 if (np->repflow)
3923 - newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
3924 + newnp->flow_label = 0;
3925
3926 /*
3927 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
3928 diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
3929 index 25eeb6d2a75a..f0ec068e1d02 100644
3930 --- a/net/netlink/genetlink.c
3931 +++ b/net/netlink/genetlink.c
3932 @@ -366,7 +366,7 @@ int genl_register_family(struct genl_family *family)
3933 start, end + 1, GFP_KERNEL);
3934 if (family->id < 0) {
3935 err = family->id;
3936 - goto errout_locked;
3937 + goto errout_free;
3938 }
3939
3940 err = genl_validate_assign_mc_groups(family);
3941 @@ -385,6 +385,7 @@ int genl_register_family(struct genl_family *family)
3942
3943 errout_remove:
3944 idr_remove(&genl_fam_idr, family->id);
3945 +errout_free:
3946 kfree(family->attrbuf);
3947 errout_locked:
3948 genl_unlock_all();
3949 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3950 index fd16fb836df2..a0d295478e69 100644
3951 --- a/net/packet/af_packet.c
3952 +++ b/net/packet/af_packet.c
3953 @@ -3245,7 +3245,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
3954 }
3955
3956 mutex_lock(&net->packet.sklist_lock);
3957 - sk_add_node_rcu(sk, &net->packet.sklist);
3958 + sk_add_node_tail_rcu(sk, &net->packet.sklist);
3959 mutex_unlock(&net->packet.sklist_lock);
3960
3961 preempt_disable();
3962 @@ -4194,7 +4194,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3963 struct pgv *pg_vec;
3964 int i;
3965
3966 - pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3967 + pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
3968 if (unlikely(!pg_vec))
3969 goto out;
3970
3971 diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
3972 index 7ca57741b2fb..7849f286bb93 100644
3973 --- a/net/rose/rose_subr.c
3974 +++ b/net/rose/rose_subr.c
3975 @@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype)
3976 struct sk_buff *skb;
3977 unsigned char *dptr;
3978 unsigned char lci1, lci2;
3979 - char buffer[100];
3980 - int len, faclen = 0;
3981 + int maxfaclen = 0;
3982 + int len, faclen;
3983 + int reserve;
3984
3985 - len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
3986 + reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
3987 + len = ROSE_MIN_LEN;
3988
3989 switch (frametype) {
3990 case ROSE_CALL_REQUEST:
3991 len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
3992 - faclen = rose_create_facilities(buffer, rose);
3993 - len += faclen;
3994 + maxfaclen = 256;
3995 break;
3996 case ROSE_CALL_ACCEPTED:
3997 case ROSE_CLEAR_REQUEST:
3998 @@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype)
3999 break;
4000 }
4001
4002 - if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
4003 + skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
4004 + if (!skb)
4005 return;
4006
4007 /*
4008 * Space for AX.25 header and PID.
4009 */
4010 - skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
4011 + skb_reserve(skb, reserve);
4012
4013 - dptr = skb_put(skb, skb_tailroom(skb));
4014 + dptr = skb_put(skb, len);
4015
4016 lci1 = (rose->lci >> 8) & 0x0F;
4017 lci2 = (rose->lci >> 0) & 0xFF;
4018 @@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype)
4019 dptr += ROSE_ADDR_LEN;
4020 memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
4021 dptr += ROSE_ADDR_LEN;
4022 - memcpy(dptr, buffer, faclen);
4023 + faclen = rose_create_facilities(dptr, rose);
4024 + skb_put(skb, faclen);
4025 dptr += faclen;
4026 break;
4027
4028 diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
4029 index 8bf66d0a6800..f767e78e38c9 100644
4030 --- a/net/sched/act_mirred.c
4031 +++ b/net/sched/act_mirred.c
4032 @@ -159,6 +159,9 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
4033 }
4034 m = to_mirred(*a);
4035
4036 + if (ret == ACT_P_CREATED)
4037 + INIT_LIST_HEAD(&m->tcfm_list);
4038 +
4039 spin_lock_bh(&m->tcf_lock);
4040 m->tcf_action = parm->action;
4041 m->tcfm_eaction = parm->eaction;
4042 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
4043 index 1b16250c5718..8c00a7ef1bcd 100644
4044 --- a/net/sctp/socket.c
4045 +++ b/net/sctp/socket.c
4046 @@ -1017,7 +1017,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
4047 if (unlikely(addrs_size <= 0))
4048 return -EINVAL;
4049
4050 - kaddrs = vmemdup_user(addrs, addrs_size);
4051 + kaddrs = memdup_user(addrs, addrs_size);
4052 if (unlikely(IS_ERR(kaddrs)))
4053 return PTR_ERR(kaddrs);
4054
4055 @@ -1025,7 +1025,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
4056 addr_buf = kaddrs;
4057 while (walk_size < addrs_size) {
4058 if (walk_size + sizeof(sa_family_t) > addrs_size) {
4059 - kvfree(kaddrs);
4060 + kfree(kaddrs);
4061 return -EINVAL;
4062 }
4063
4064 @@ -1036,7 +1036,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
4065 * causes the address buffer to overflow return EINVAL.
4066 */
4067 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
4068 - kvfree(kaddrs);
4069 + kfree(kaddrs);
4070 return -EINVAL;
4071 }
4072 addrcnt++;
4073 @@ -1072,7 +1072,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
4074 }
4075
4076 out:
4077 - kvfree(kaddrs);
4078 + kfree(kaddrs);
4079
4080 return err;
4081 }
4082 @@ -1347,7 +1347,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
4083 if (unlikely(addrs_size <= 0))
4084 return -EINVAL;
4085
4086 - kaddrs = vmemdup_user(addrs, addrs_size);
4087 + kaddrs = memdup_user(addrs, addrs_size);
4088 if (unlikely(IS_ERR(kaddrs)))
4089 return PTR_ERR(kaddrs);
4090
4091 @@ -1367,7 +1367,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
4092 err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
4093
4094 out_free:
4095 - kvfree(kaddrs);
4096 + kfree(kaddrs);
4097
4098 return err;
4099 }
4100 diff --git a/net/tipc/net.c b/net/tipc/net.c
4101 index f076edb74338..7ce1e86b024f 100644
4102 --- a/net/tipc/net.c
4103 +++ b/net/tipc/net.c
4104 @@ -163,12 +163,9 @@ void tipc_sched_net_finalize(struct net *net, u32 addr)
4105
4106 void tipc_net_stop(struct net *net)
4107 {
4108 - u32 self = tipc_own_addr(net);
4109 -
4110 - if (!self)
4111 + if (!tipc_own_id(net))
4112 return;
4113
4114 - tipc_nametbl_withdraw(net, TIPC_CFG_SRV, self, self, self);
4115 rtnl_lock();
4116 tipc_bearer_stop(net);
4117 tipc_node_stop(net);
4118 diff --git a/net/tipc/socket.c b/net/tipc/socket.c
4119 index 88c307ef1318..67a7b312a499 100644
4120 --- a/net/tipc/socket.c
4121 +++ b/net/tipc/socket.c
4122 @@ -2310,6 +2310,16 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
4123 return 0;
4124 }
4125
4126 +static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
4127 +{
4128 + if (addr->family != AF_TIPC)
4129 + return false;
4130 + if (addr->addrtype == TIPC_SERVICE_RANGE)
4131 + return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
4132 + return (addr->addrtype == TIPC_SERVICE_ADDR ||
4133 + addr->addrtype == TIPC_SOCKET_ADDR);
4134 +}
4135 +
4136 /**
4137 * tipc_connect - establish a connection to another TIPC port
4138 * @sock: socket structure
4139 @@ -2345,18 +2355,18 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
4140 if (!tipc_sk_type_connectionless(sk))
4141 res = -EINVAL;
4142 goto exit;
4143 - } else if (dst->family != AF_TIPC) {
4144 - res = -EINVAL;
4145 }
4146 - if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
4147 + if (!tipc_sockaddr_is_sane(dst)) {
4148 res = -EINVAL;
4149 - if (res)
4150 goto exit;
4151 -
4152 + }
4153 /* DGRAM/RDM connect(), just save the destaddr */
4154 if (tipc_sk_type_connectionless(sk)) {
4155 memcpy(&tsk->peer, dest, destlen);
4156 goto exit;
4157 + } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
4158 + res = -EINVAL;
4159 + goto exit;
4160 }
4161
4162 previous = sk->sk_state;
4163 diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
4164 index d65eed88c495..2301b09df234 100644
4165 --- a/net/tipc/topsrv.c
4166 +++ b/net/tipc/topsrv.c
4167 @@ -371,6 +371,7 @@ static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
4168 struct tipc_subscription *sub;
4169
4170 if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
4171 + s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL);
4172 tipc_conn_delete_sub(con, s);
4173 return 0;
4174 }
4175 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
4176 index 5a77efd39b3f..858cbe56b100 100644
4177 --- a/scripts/mod/modpost.c
4178 +++ b/scripts/mod/modpost.c
4179 @@ -640,7 +640,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
4180 info->sechdrs[sym->st_shndx].sh_offset -
4181 (info->hdr->e_type != ET_REL ?
4182 info->sechdrs[sym->st_shndx].sh_addr : 0);
4183 - crc = *crcp;
4184 + crc = TO_NATIVE(*crcp);
4185 }
4186 sym_update_crc(symname + strlen("__crc_"), mod, crc,
4187 export);
4188 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
4189 index 467039b342b5..41abb8bd466a 100644
4190 --- a/sound/core/oss/pcm_oss.c
4191 +++ b/sound/core/oss/pcm_oss.c
4192 @@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
4193 oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
4194 params_channels(params) / 8;
4195
4196 + err = snd_pcm_oss_period_size(substream, params, sparams);
4197 + if (err < 0)
4198 + goto failure;
4199 +
4200 + n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
4201 + err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
4202 + if (err < 0)
4203 + goto failure;
4204 +
4205 + err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
4206 + runtime->oss.periods, NULL);
4207 + if (err < 0)
4208 + goto failure;
4209 +
4210 + snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
4211 +
4212 + err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
4213 + if (err < 0) {
4214 + pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
4215 + goto failure;
4216 + }
4217 +
4218 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
4219 snd_pcm_oss_plugin_clear(substream);
4220 if (!direct) {
4221 @@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
4222 }
4223 #endif
4224
4225 - err = snd_pcm_oss_period_size(substream, params, sparams);
4226 - if (err < 0)
4227 - goto failure;
4228 -
4229 - n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
4230 - err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
4231 - if (err < 0)
4232 - goto failure;
4233 -
4234 - err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
4235 - runtime->oss.periods, NULL);
4236 - if (err < 0)
4237 - goto failure;
4238 -
4239 - snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
4240 -
4241 - if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
4242 - pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
4243 - goto failure;
4244 - }
4245 -
4246 if (runtime->oss.trigger) {
4247 sw_params->start_threshold = 1;
4248 } else {
4249 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
4250 index 818dff1de545..b67f6fe08a1b 100644
4251 --- a/sound/core/pcm_native.c
4252 +++ b/sound/core/pcm_native.c
4253 @@ -1426,8 +1426,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
4254 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
4255 {
4256 struct snd_pcm_runtime *runtime = substream->runtime;
4257 - if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
4258 + switch (runtime->status->state) {
4259 + case SNDRV_PCM_STATE_SUSPENDED:
4260 return -EBUSY;
4261 + /* unresumable PCM state; return -EBUSY for skipping suspend */
4262 + case SNDRV_PCM_STATE_OPEN:
4263 + case SNDRV_PCM_STATE_SETUP:
4264 + case SNDRV_PCM_STATE_DISCONNECTED:
4265 + return -EBUSY;
4266 + }
4267 runtime->trigger_master = substream;
4268 return 0;
4269 }
4270 diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
4271 index 08d5662039e3..a52d6d16efc4 100644
4272 --- a/sound/core/rawmidi.c
4273 +++ b/sound/core/rawmidi.c
4274 @@ -30,6 +30,7 @@
4275 #include <linux/module.h>
4276 #include <linux/delay.h>
4277 #include <linux/mm.h>
4278 +#include <linux/nospec.h>
4279 #include <sound/rawmidi.h>
4280 #include <sound/info.h>
4281 #include <sound/control.h>
4282 @@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
4283 return -ENXIO;
4284 if (info->stream < 0 || info->stream > 1)
4285 return -EINVAL;
4286 + info->stream = array_index_nospec(info->stream, 2);
4287 pstr = &rmidi->streams[info->stream];
4288 if (pstr->substream_count == 0)
4289 return -ENOENT;
4290 diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
4291 index 278ebb993122..c93945917235 100644
4292 --- a/sound/core/seq/oss/seq_oss_synth.c
4293 +++ b/sound/core/seq/oss/seq_oss_synth.c
4294 @@ -617,13 +617,14 @@ int
4295 snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
4296 {
4297 struct seq_oss_synth *rec;
4298 + struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
4299
4300 - if (dev < 0 || dev >= dp->max_synthdev)
4301 + if (!info)
4302 return -ENXIO;
4303
4304 - if (dp->synths[dev].is_midi) {
4305 + if (info->is_midi) {
4306 struct midi_info minf;
4307 - snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
4308 + snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
4309 inf->synth_type = SYNTH_TYPE_MIDI;
4310 inf->synth_subtype = 0;
4311 inf->nr_voices = 16;
4312 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4313 index 877293149e3a..4c6321ec844d 100644
4314 --- a/sound/pci/hda/patch_realtek.c
4315 +++ b/sound/pci/hda/patch_realtek.c
4316 @@ -5613,6 +5613,12 @@ enum {
4317 ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
4318 ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
4319 ALC255_FIXUP_ACER_HEADSET_MIC,
4320 + ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
4321 + ALC225_FIXUP_WYSE_AUTO_MUTE,
4322 + ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
4323 + ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
4324 + ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
4325 + ALC299_FIXUP_PREDATOR_SPK,
4326 };
4327
4328 static const struct hda_fixup alc269_fixups[] = {
4329 @@ -6567,6 +6573,54 @@ static const struct hda_fixup alc269_fixups[] = {
4330 .chained = true,
4331 .chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC
4332 },
4333 + [ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE] = {
4334 + .type = HDA_FIXUP_PINS,
4335 + .v.pins = (const struct hda_pintbl[]) {
4336 + { 0x16, 0x01011020 }, /* Rear Line out */
4337 + { 0x19, 0x01a1913c }, /* use as Front headset mic, without its own jack detect */
4338 + { }
4339 + },
4340 + .chained = true,
4341 + .chain_id = ALC225_FIXUP_WYSE_AUTO_MUTE
4342 + },
4343 + [ALC225_FIXUP_WYSE_AUTO_MUTE] = {
4344 + .type = HDA_FIXUP_FUNC,
4345 + .v.func = alc_fixup_auto_mute_via_amp,
4346 + .chained = true,
4347 + .chain_id = ALC225_FIXUP_WYSE_DISABLE_MIC_VREF
4348 + },
4349 + [ALC225_FIXUP_WYSE_DISABLE_MIC_VREF] = {
4350 + .type = HDA_FIXUP_FUNC,
4351 + .v.func = alc_fixup_disable_mic_vref,
4352 + .chained = true,
4353 + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
4354 + },
4355 + [ALC286_FIXUP_ACER_AIO_HEADSET_MIC] = {
4356 + .type = HDA_FIXUP_VERBS,
4357 + .v.verbs = (const struct hda_verb[]) {
4358 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x4f },
4359 + { 0x20, AC_VERB_SET_PROC_COEF, 0x5029 },
4360 + { }
4361 + },
4362 + .chained = true,
4363 + .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
4364 + },
4365 + [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
4366 + .type = HDA_FIXUP_PINS,
4367 + .v.pins = (const struct hda_pintbl[]) {
4368 + { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
4369 + { }
4370 + },
4371 + .chained = true,
4372 + .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
4373 + },
4374 + [ALC299_FIXUP_PREDATOR_SPK] = {
4375 + .type = HDA_FIXUP_PINS,
4376 + .v.pins = (const struct hda_pintbl[]) {
4377 + { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */
4378 + { }
4379 + }
4380 + },
4381 };
4382
4383 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4384 @@ -6583,9 +6637,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4385 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
4386 SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4387 SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
4388 - SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4389 - SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4390 - SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4391 + SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
4392 + SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
4393 + SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
4394 + SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
4395 + SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
4396 + SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
4397 + SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
4398 SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
4399 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
4400 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
4401 @@ -6631,6 +6689,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4402 SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
4403 SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
4404 SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
4405 + SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
4406 + SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
4407 SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
4408 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4409 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4410 @@ -6976,6 +7036,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4411 {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
4412 {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
4413 {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
4414 + {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
4415 {}
4416 };
4417 #define ALC225_STANDARD_PINS \
4418 @@ -7196,6 +7257,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4419 {0x14, 0x90170110},
4420 {0x1b, 0x90a70130},
4421 {0x21, 0x03211020}),
4422 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
4423 + {0x12, 0x90a60130},
4424 + {0x14, 0x90170110},
4425 + {0x21, 0x03211020}),
4426 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
4427 + {0x12, 0x90a60130},
4428 + {0x14, 0x90170110},
4429 + {0x21, 0x04211020}),
4430 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
4431 + {0x1a, 0x90a70130},
4432 + {0x1b, 0x90170110},
4433 + {0x21, 0x03211020}),
4434 SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
4435 {0x12, 0xb7a60130},
4436 {0x13, 0xb8a61140},
4437 diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
4438 index c9d038f91af6..53f8be0f4a1f 100644
4439 --- a/tools/objtool/Makefile
4440 +++ b/tools/objtool/Makefile
4441 @@ -25,14 +25,17 @@ LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a
4442 OBJTOOL := $(OUTPUT)objtool
4443 OBJTOOL_IN := $(OBJTOOL)-in.o
4444
4445 +LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
4446 +LIBELF_LIBS := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
4447 +
4448 all: $(OBJTOOL)
4449
4450 INCLUDES := -I$(srctree)/tools/include \
4451 -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
4452 -I$(srctree)/tools/objtool/arch/$(ARCH)/include
4453 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
4454 -CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES)
4455 -LDFLAGS += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
4456 +CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
4457 +LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
4458
4459 # Allow old libelf to be used:
4460 elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
4461 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4462 index f3db68abbd9a..0bc3e6e93c31 100644
4463 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4464 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4465 @@ -251,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
4466 if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
4467 decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
4468 decoder->tsc_ctc_ratio_d;
4469 -
4470 - /*
4471 - * Allow for timestamps appearing to backwards because a TSC
4472 - * packet has slipped past a MTC packet, so allow 2 MTC ticks
4473 - * or ...
4474 - */
4475 - decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
4476 - decoder->tsc_ctc_ratio_n,
4477 - decoder->tsc_ctc_ratio_d);
4478 }
4479 - /* ... or 0x100 paranoia */
4480 - if (decoder->tsc_slip < 0x100)
4481 - decoder->tsc_slip = 0x100;
4482 +
4483 + /*
4484 + * A TSC packet can slip past MTC packets so that the timestamp appears
4485 + * to go backwards. One estimate is that can be up to about 40 CPU
4486 + * cycles, which is certainly less than 0x1000 TSC ticks, but accept
4487 + * slippage an order of magnitude more to be on the safe side.
4488 + */
4489 + decoder->tsc_slip = 0x10000;
4490
4491 intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
4492 intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
4493 diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
4494 index 7348eea0248f..36cfc64c3824 100644
4495 --- a/tools/perf/util/pmu.c
4496 +++ b/tools/perf/util/pmu.c
4497 @@ -773,10 +773,20 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
4498
4499 if (!is_arm_pmu_core(name)) {
4500 pname = pe->pmu ? pe->pmu : "cpu";
4501 +
4502 + /*
4503 + * uncore alias may be from different PMU
4504 + * with common prefix
4505 + */
4506 + if (pmu_is_uncore(name) &&
4507 + !strncmp(pname, name, strlen(pname)))
4508 + goto new_alias;
4509 +
4510 if (strcmp(pname, name))
4511 continue;
4512 }
4513
4514 +new_alias:
4515 /* need type casts to override 'const' */
4516 __perf_pmu__new_alias(head, NULL, (char *)pe->name,
4517 (char *)pe->desc, (char *)pe->event,
4518 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4519 index c436d95fd7aa..6a79df88b546 100644
4520 --- a/virt/kvm/kvm_main.c
4521 +++ b/virt/kvm/kvm_main.c
4522 @@ -2815,6 +2815,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4523 {
4524 struct kvm_device *dev = filp->private_data;
4525
4526 + if (dev->kvm->mm != current->mm)
4527 + return -EIO;
4528 +
4529 switch (ioctl) {
4530 case KVM_SET_DEVICE_ATTR:
4531 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);