Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.15/0109-4.15.10-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3093 - (show annotations) (download)
Wed Mar 21 14:52:52 2018 UTC (6 years, 1 month ago) by niro
File size: 211061 byte(s)
-linux-4.15.10
1 diff --git a/Documentation/devicetree/bindings/power/mti,mips-cpc.txt b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt
2 new file mode 100644
3 index 000000000000..c6b82511ae8a
4 --- /dev/null
5 +++ b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt
6 @@ -0,0 +1,8 @@
7 +Binding for MIPS Cluster Power Controller (CPC).
8 +
9 +This binding allows a system to specify where the CPC registers are
10 +located.
11 +
12 +Required properties:
13 +compatible : Should be "mti,mips-cpc".
14 +regs: Should describe the address & size of the CPC register region.
15 diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
16 index 39aa9e8697cc..fbedcc39460b 100644
17 --- a/Documentation/sphinx/kerneldoc.py
18 +++ b/Documentation/sphinx/kerneldoc.py
19 @@ -36,8 +36,7 @@ import glob
20
21 from docutils import nodes, statemachine
22 from docutils.statemachine import ViewList
23 -from docutils.parsers.rst import directives
24 -from sphinx.util.compat import Directive
25 +from docutils.parsers.rst import directives, Directive
26 from sphinx.ext.autodoc import AutodocReporter
27
28 __version__ = '1.0'
29 diff --git a/MAINTAINERS b/MAINTAINERS
30 index 845fc25812f1..8e5d2e5d85bf 100644
31 --- a/MAINTAINERS
32 +++ b/MAINTAINERS
33 @@ -9107,6 +9107,7 @@ MIPS GENERIC PLATFORM
34 M: Paul Burton <paul.burton@mips.com>
35 L: linux-mips@linux-mips.org
36 S: Supported
37 +F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt
38 F: arch/mips/generic/
39 F: arch/mips/tools/generic-board-config.sh
40
41 diff --git a/Makefile b/Makefile
42 index 0420f9a0c70f..7eed0f168b13 100644
43 --- a/Makefile
44 +++ b/Makefile
45 @@ -1,7 +1,7 @@
46 # SPDX-License-Identifier: GPL-2.0
47 VERSION = 4
48 PATCHLEVEL = 15
49 -SUBLEVEL = 9
50 +SUBLEVEL = 10
51 EXTRAVERSION =
52 NAME = Fearless Coyote
53
54 @@ -487,6 +487,11 @@ KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
55 KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
56 endif
57
58 +RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
59 +RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
60 +RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
61 +export RETPOLINE_CFLAGS
62 +
63 ifeq ($(config-targets),1)
64 # ===========================================================================
65 # *config targets only - make sure prerequisites are updated, and descend
66 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
67 index 451f96f3377c..5bdc2c4db9ad 100644
68 --- a/arch/arm64/mm/mmu.c
69 +++ b/arch/arm64/mm/mmu.c
70 @@ -107,7 +107,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
71 * The following mapping attributes may be updated in live
72 * kernel mappings without the need for break-before-make.
73 */
74 - static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
75 + static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
76
77 /* creating or taking down mappings is always safe */
78 if (old == 0 || new == 0)
79 @@ -117,9 +117,9 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
80 if ((old | new) & PTE_CONT)
81 return false;
82
83 - /* Transitioning from Global to Non-Global is safe */
84 - if (((old ^ new) == PTE_NG) && (new & PTE_NG))
85 - return true;
86 + /* Transitioning from Non-Global to Global is unsafe */
87 + if (old & ~new & PTE_NG)
88 + return false;
89
90 return ((old ^ new) & ~mask) == 0;
91 }
92 diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
93 index 9ab48ff80c1c..6d11ae581ea7 100644
94 --- a/arch/mips/ath25/board.c
95 +++ b/arch/mips/ath25/board.c
96 @@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
97 }
98
99 board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
100 + if (!board_data)
101 + goto error;
102 ath25_board.config = (struct ath25_boarddata *)board_data;
103 memcpy_fromio(board_data, bcfg, 0x100);
104 if (broken_boarddata) {
105 diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
106 index 5b3a3f6a9ad3..d99f5242169e 100644
107 --- a/arch/mips/cavium-octeon/octeon-irq.c
108 +++ b/arch/mips/cavium-octeon/octeon-irq.c
109 @@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
110 }
111
112 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
113 + if (!host_data)
114 + return -ENOMEM;
115 raw_spin_lock_init(&host_data->lock);
116
117 addr = of_get_address(ciu_node, 0, NULL, NULL);
118 diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
119 index 19c88d770054..fcf9af492d60 100644
120 --- a/arch/mips/kernel/mips-cpc.c
121 +++ b/arch/mips/kernel/mips-cpc.c
122 @@ -10,6 +10,8 @@
123
124 #include <linux/errno.h>
125 #include <linux/percpu.h>
126 +#include <linux/of.h>
127 +#include <linux/of_address.h>
128 #include <linux/spinlock.h>
129
130 #include <asm/mips-cps.h>
131 @@ -22,6 +24,17 @@ static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
132
133 phys_addr_t __weak mips_cpc_default_phys_base(void)
134 {
135 + struct device_node *cpc_node;
136 + struct resource res;
137 + int err;
138 +
139 + cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
140 + if (cpc_node) {
141 + err = of_address_to_resource(cpc_node, 0, &res);
142 + if (!err)
143 + return res.start;
144 + }
145 +
146 return 0;
147 }
148
149 diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
150 index 87dcac2447c8..382d12eb88f0 100644
151 --- a/arch/mips/kernel/smp-bmips.c
152 +++ b/arch/mips/kernel/smp-bmips.c
153 @@ -168,11 +168,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
154 return;
155 }
156
157 - if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
158 - "smp_ipi0", NULL))
159 + if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
160 + IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
161 panic("Can't request IPI0 interrupt");
162 - if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
163 - "smp_ipi1", NULL))
164 + if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
165 + IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
166 panic("Can't request IPI1 interrupt");
167 }
168
169 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
170 index 5c03e371b7b8..004684eaa827 100644
171 --- a/arch/s390/kvm/kvm-s390.c
172 +++ b/arch/s390/kvm/kvm-s390.c
173 @@ -2118,6 +2118,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
174 /* we still need the basic sca for the ipte control */
175 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
176 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
177 + return;
178 }
179 read_lock(&vcpu->kvm->arch.sca_lock);
180 if (vcpu->kvm->arch.use_esca) {
181 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
182 index 20da391b5f32..7bb4eb14a2e0 100644
183 --- a/arch/x86/Kconfig
184 +++ b/arch/x86/Kconfig
185 @@ -432,6 +432,7 @@ config GOLDFISH
186 config RETPOLINE
187 bool "Avoid speculative indirect branches in kernel"
188 default y
189 + select STACK_VALIDATION if HAVE_STACK_VALIDATION
190 help
191 Compile kernel with the retpoline compiler options to guard against
192 kernel-to-user data leaks by avoiding speculative indirect
193 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
194 index fad55160dcb9..498c1b812300 100644
195 --- a/arch/x86/Makefile
196 +++ b/arch/x86/Makefile
197 @@ -232,10 +232,9 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
198
199 # Avoid indirect branches in kernel to deal with Spectre
200 ifdef CONFIG_RETPOLINE
201 - RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
202 - ifneq ($(RETPOLINE_CFLAGS),)
203 - KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
204 - endif
205 +ifneq ($(RETPOLINE_CFLAGS),)
206 + KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
207 +endif
208 endif
209
210 archscripts: scripts_basic
211 diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
212 index dce7092ab24a..5d10b7a85cad 100644
213 --- a/arch/x86/entry/calling.h
214 +++ b/arch/x86/entry/calling.h
215 @@ -97,7 +97,7 @@ For 32-bit we have the following conventions - kernel is built with
216
217 #define SIZEOF_PTREGS 21*8
218
219 -.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
220 +.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
221 /*
222 * Push registers and sanitize registers of values that a
223 * speculation attack might otherwise want to exploit. The
224 @@ -105,32 +105,41 @@ For 32-bit we have the following conventions - kernel is built with
225 * could be put to use in a speculative execution gadget.
226 * Interleave XOR with PUSH for better uop scheduling:
227 */
228 + .if \save_ret
229 + pushq %rsi /* pt_regs->si */
230 + movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
231 + movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */
232 + .else
233 pushq %rdi /* pt_regs->di */
234 pushq %rsi /* pt_regs->si */
235 + .endif
236 pushq \rdx /* pt_regs->dx */
237 pushq %rcx /* pt_regs->cx */
238 pushq \rax /* pt_regs->ax */
239 pushq %r8 /* pt_regs->r8 */
240 - xorq %r8, %r8 /* nospec r8 */
241 + xorl %r8d, %r8d /* nospec r8 */
242 pushq %r9 /* pt_regs->r9 */
243 - xorq %r9, %r9 /* nospec r9 */
244 + xorl %r9d, %r9d /* nospec r9 */
245 pushq %r10 /* pt_regs->r10 */
246 - xorq %r10, %r10 /* nospec r10 */
247 + xorl %r10d, %r10d /* nospec r10 */
248 pushq %r11 /* pt_regs->r11 */
249 - xorq %r11, %r11 /* nospec r11*/
250 + xorl %r11d, %r11d /* nospec r11*/
251 pushq %rbx /* pt_regs->rbx */
252 xorl %ebx, %ebx /* nospec rbx*/
253 pushq %rbp /* pt_regs->rbp */
254 xorl %ebp, %ebp /* nospec rbp*/
255 pushq %r12 /* pt_regs->r12 */
256 - xorq %r12, %r12 /* nospec r12*/
257 + xorl %r12d, %r12d /* nospec r12*/
258 pushq %r13 /* pt_regs->r13 */
259 - xorq %r13, %r13 /* nospec r13*/
260 + xorl %r13d, %r13d /* nospec r13*/
261 pushq %r14 /* pt_regs->r14 */
262 - xorq %r14, %r14 /* nospec r14*/
263 + xorl %r14d, %r14d /* nospec r14*/
264 pushq %r15 /* pt_regs->r15 */
265 - xorq %r15, %r15 /* nospec r15*/
266 + xorl %r15d, %r15d /* nospec r15*/
267 UNWIND_HINT_REGS
268 + .if \save_ret
269 + pushq %rsi /* return address on top of stack */
270 + .endif
271 .endm
272
273 .macro POP_REGS pop_rdi=1 skip_r11rcx=0
274 diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
275 index 2a35b1e0fb90..60c4c342316c 100644
276 --- a/arch/x86/entry/entry_32.S
277 +++ b/arch/x86/entry/entry_32.S
278 @@ -252,8 +252,7 @@ ENTRY(__switch_to_asm)
279 * exist, overwrite the RSB with entries which capture
280 * speculative execution to prevent attack.
281 */
282 - /* Clobbers %ebx */
283 - FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
284 + FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
285 #endif
286
287 /* restore callee-saved registers */
288 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
289 index 4fd9044e72e7..50dcbf640850 100644
290 --- a/arch/x86/entry/entry_64.S
291 +++ b/arch/x86/entry/entry_64.S
292 @@ -364,8 +364,7 @@ ENTRY(__switch_to_asm)
293 * exist, overwrite the RSB with entries which capture
294 * speculative execution to prevent attack.
295 */
296 - /* Clobbers %rbx */
297 - FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
298 + FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
299 #endif
300
301 /* restore callee-saved registers */
302 @@ -871,12 +870,8 @@ ENTRY(\sym)
303 pushq $-1 /* ORIG_RAX: no syscall to restart */
304 .endif
305
306 - /* Save all registers in pt_regs */
307 - PUSH_AND_CLEAR_REGS
308 - ENCODE_FRAME_POINTER
309 -
310 .if \paranoid < 2
311 - testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
312 + testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */
313 jnz .Lfrom_usermode_switch_stack_\@
314 .endif
315
316 @@ -1123,13 +1118,15 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
317 #endif
318
319 /*
320 - * Switch gs if needed.
321 + * Save all registers in pt_regs, and switch gs if needed.
322 * Use slow, but surefire "are we in kernel?" check.
323 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
324 */
325 ENTRY(paranoid_entry)
326 UNWIND_HINT_FUNC
327 cld
328 + PUSH_AND_CLEAR_REGS save_ret=1
329 + ENCODE_FRAME_POINTER 8
330 movl $1, %ebx
331 movl $MSR_GS_BASE, %ecx
332 rdmsr
333 @@ -1174,12 +1171,14 @@ ENTRY(paranoid_exit)
334 END(paranoid_exit)
335
336 /*
337 - * Switch gs if needed.
338 + * Save all registers in pt_regs, and switch GS if needed.
339 * Return: EBX=0: came from user mode; EBX=1: otherwise
340 */
341 ENTRY(error_entry)
342 - UNWIND_HINT_REGS offset=8
343 + UNWIND_HINT_FUNC
344 cld
345 + PUSH_AND_CLEAR_REGS save_ret=1
346 + ENCODE_FRAME_POINTER 8
347 testb $3, CS+8(%rsp)
348 jz .Lerror_kernelspace
349
350 @@ -1570,8 +1569,6 @@ end_repeat_nmi:
351 * frame to point back to repeat_nmi.
352 */
353 pushq $-1 /* ORIG_RAX: no syscall to restart */
354 - PUSH_AND_CLEAR_REGS
355 - ENCODE_FRAME_POINTER
356
357 /*
358 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
359 diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
360 index fd65e016e413..364ea4a207be 100644
361 --- a/arch/x86/entry/entry_64_compat.S
362 +++ b/arch/x86/entry/entry_64_compat.S
363 @@ -85,25 +85,25 @@ ENTRY(entry_SYSENTER_compat)
364 pushq %rcx /* pt_regs->cx */
365 pushq $-ENOSYS /* pt_regs->ax */
366 pushq $0 /* pt_regs->r8 = 0 */
367 - xorq %r8, %r8 /* nospec r8 */
368 + xorl %r8d, %r8d /* nospec r8 */
369 pushq $0 /* pt_regs->r9 = 0 */
370 - xorq %r9, %r9 /* nospec r9 */
371 + xorl %r9d, %r9d /* nospec r9 */
372 pushq $0 /* pt_regs->r10 = 0 */
373 - xorq %r10, %r10 /* nospec r10 */
374 + xorl %r10d, %r10d /* nospec r10 */
375 pushq $0 /* pt_regs->r11 = 0 */
376 - xorq %r11, %r11 /* nospec r11 */
377 + xorl %r11d, %r11d /* nospec r11 */
378 pushq %rbx /* pt_regs->rbx */
379 xorl %ebx, %ebx /* nospec rbx */
380 pushq %rbp /* pt_regs->rbp (will be overwritten) */
381 xorl %ebp, %ebp /* nospec rbp */
382 pushq $0 /* pt_regs->r12 = 0 */
383 - xorq %r12, %r12 /* nospec r12 */
384 + xorl %r12d, %r12d /* nospec r12 */
385 pushq $0 /* pt_regs->r13 = 0 */
386 - xorq %r13, %r13 /* nospec r13 */
387 + xorl %r13d, %r13d /* nospec r13 */
388 pushq $0 /* pt_regs->r14 = 0 */
389 - xorq %r14, %r14 /* nospec r14 */
390 + xorl %r14d, %r14d /* nospec r14 */
391 pushq $0 /* pt_regs->r15 = 0 */
392 - xorq %r15, %r15 /* nospec r15 */
393 + xorl %r15d, %r15d /* nospec r15 */
394 cld
395
396 /*
397 @@ -224,25 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
398 pushq %rbp /* pt_regs->cx (stashed in bp) */
399 pushq $-ENOSYS /* pt_regs->ax */
400 pushq $0 /* pt_regs->r8 = 0 */
401 - xorq %r8, %r8 /* nospec r8 */
402 + xorl %r8d, %r8d /* nospec r8 */
403 pushq $0 /* pt_regs->r9 = 0 */
404 - xorq %r9, %r9 /* nospec r9 */
405 + xorl %r9d, %r9d /* nospec r9 */
406 pushq $0 /* pt_regs->r10 = 0 */
407 - xorq %r10, %r10 /* nospec r10 */
408 + xorl %r10d, %r10d /* nospec r10 */
409 pushq $0 /* pt_regs->r11 = 0 */
410 - xorq %r11, %r11 /* nospec r11 */
411 + xorl %r11d, %r11d /* nospec r11 */
412 pushq %rbx /* pt_regs->rbx */
413 xorl %ebx, %ebx /* nospec rbx */
414 pushq %rbp /* pt_regs->rbp (will be overwritten) */
415 xorl %ebp, %ebp /* nospec rbp */
416 pushq $0 /* pt_regs->r12 = 0 */
417 - xorq %r12, %r12 /* nospec r12 */
418 + xorl %r12d, %r12d /* nospec r12 */
419 pushq $0 /* pt_regs->r13 = 0 */
420 - xorq %r13, %r13 /* nospec r13 */
421 + xorl %r13d, %r13d /* nospec r13 */
422 pushq $0 /* pt_regs->r14 = 0 */
423 - xorq %r14, %r14 /* nospec r14 */
424 + xorl %r14d, %r14d /* nospec r14 */
425 pushq $0 /* pt_regs->r15 = 0 */
426 - xorq %r15, %r15 /* nospec r15 */
427 + xorl %r15d, %r15d /* nospec r15 */
428
429 /*
430 * User mode is traced as though IRQs are on, and SYSENTER
431 @@ -298,9 +298,9 @@ sysret32_from_system_call:
432 */
433 SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9
434
435 - xorq %r8, %r8
436 - xorq %r9, %r9
437 - xorq %r10, %r10
438 + xorl %r8d, %r8d
439 + xorl %r9d, %r9d
440 + xorl %r10d, %r10d
441 swapgs
442 sysretl
443 END(entry_SYSCALL_compat)
444 @@ -358,25 +358,25 @@ ENTRY(entry_INT80_compat)
445 pushq %rcx /* pt_regs->cx */
446 pushq $-ENOSYS /* pt_regs->ax */
447 pushq $0 /* pt_regs->r8 = 0 */
448 - xorq %r8, %r8 /* nospec r8 */
449 + xorl %r8d, %r8d /* nospec r8 */
450 pushq $0 /* pt_regs->r9 = 0 */
451 - xorq %r9, %r9 /* nospec r9 */
452 + xorl %r9d, %r9d /* nospec r9 */
453 pushq $0 /* pt_regs->r10 = 0 */
454 - xorq %r10, %r10 /* nospec r10 */
455 + xorl %r10d, %r10d /* nospec r10 */
456 pushq $0 /* pt_regs->r11 = 0 */
457 - xorq %r11, %r11 /* nospec r11 */
458 + xorl %r11d, %r11d /* nospec r11 */
459 pushq %rbx /* pt_regs->rbx */
460 xorl %ebx, %ebx /* nospec rbx */
461 pushq %rbp /* pt_regs->rbp */
462 xorl %ebp, %ebp /* nospec rbp */
463 pushq %r12 /* pt_regs->r12 */
464 - xorq %r12, %r12 /* nospec r12 */
465 + xorl %r12d, %r12d /* nospec r12 */
466 pushq %r13 /* pt_regs->r13 */
467 - xorq %r13, %r13 /* nospec r13 */
468 + xorl %r13d, %r13d /* nospec r13 */
469 pushq %r14 /* pt_regs->r14 */
470 - xorq %r14, %r14 /* nospec r14 */
471 + xorl %r14d, %r14d /* nospec r14 */
472 pushq %r15 /* pt_regs->r15 */
473 - xorq %r15, %r15 /* nospec r15 */
474 + xorl %r15d, %r15d /* nospec r15 */
475 cld
476
477 /*
478 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
479 index 4d4015ddcf26..c356098b6fb9 100644
480 --- a/arch/x86/include/asm/apm.h
481 +++ b/arch/x86/include/asm/apm.h
482 @@ -7,6 +7,8 @@
483 #ifndef _ASM_X86_MACH_DEFAULT_APM_H
484 #define _ASM_X86_MACH_DEFAULT_APM_H
485
486 +#include <asm/nospec-branch.h>
487 +
488 #ifdef APM_ZERO_SEGS
489 # define APM_DO_ZERO_SEGS \
490 "pushl %%ds\n\t" \
491 @@ -32,6 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
492 * N.B. We do NOT need a cld after the BIOS call
493 * because we always save and restore the flags.
494 */
495 + firmware_restrict_branch_speculation_start();
496 __asm__ __volatile__(APM_DO_ZERO_SEGS
497 "pushl %%edi\n\t"
498 "pushl %%ebp\n\t"
499 @@ -44,6 +47,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
500 "=S" (*esi)
501 : "a" (func), "b" (ebx_in), "c" (ecx_in)
502 : "memory", "cc");
503 + firmware_restrict_branch_speculation_end();
504 }
505
506 static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
507 @@ -56,6 +60,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
508 * N.B. We do NOT need a cld after the BIOS call
509 * because we always save and restore the flags.
510 */
511 + firmware_restrict_branch_speculation_start();
512 __asm__ __volatile__(APM_DO_ZERO_SEGS
513 "pushl %%edi\n\t"
514 "pushl %%ebp\n\t"
515 @@ -68,6 +73,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
516 "=S" (si)
517 : "a" (func), "b" (ebx_in), "c" (ecx_in)
518 : "memory", "cc");
519 + firmware_restrict_branch_speculation_end();
520 return error;
521 }
522
523 diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
524 index 4d111616524b..1908214b9125 100644
525 --- a/arch/x86/include/asm/asm-prototypes.h
526 +++ b/arch/x86/include/asm/asm-prototypes.h
527 @@ -38,7 +38,4 @@ INDIRECT_THUNK(dx)
528 INDIRECT_THUNK(si)
529 INDIRECT_THUNK(di)
530 INDIRECT_THUNK(bp)
531 -asmlinkage void __fill_rsb(void);
532 -asmlinkage void __clear_rsb(void);
533 -
534 #endif /* CONFIG_RETPOLINE */
535 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
536 index 73b5fff159a4..66c14347c502 100644
537 --- a/arch/x86/include/asm/cpufeatures.h
538 +++ b/arch/x86/include/asm/cpufeatures.h
539 @@ -211,6 +211,7 @@
540 #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
541
542 #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
543 +#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
544
545 /* Virtualization flags: Linux defined, word 8 */
546 #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
547 diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
548 index 85f6ccb80b91..a399c1ebf6f0 100644
549 --- a/arch/x86/include/asm/efi.h
550 +++ b/arch/x86/include/asm/efi.h
551 @@ -6,6 +6,7 @@
552 #include <asm/pgtable.h>
553 #include <asm/processor-flags.h>
554 #include <asm/tlb.h>
555 +#include <asm/nospec-branch.h>
556
557 /*
558 * We map the EFI regions needed for runtime services non-contiguously,
559 @@ -36,8 +37,18 @@
560
561 extern asmlinkage unsigned long efi_call_phys(void *, ...);
562
563 -#define arch_efi_call_virt_setup() kernel_fpu_begin()
564 -#define arch_efi_call_virt_teardown() kernel_fpu_end()
565 +#define arch_efi_call_virt_setup() \
566 +({ \
567 + kernel_fpu_begin(); \
568 + firmware_restrict_branch_speculation_start(); \
569 +})
570 +
571 +#define arch_efi_call_virt_teardown() \
572 +({ \
573 + firmware_restrict_branch_speculation_end(); \
574 + kernel_fpu_end(); \
575 +})
576 +
577
578 /*
579 * Wrap all the virtual calls in a way that forces the parameters on the stack.
580 @@ -73,6 +84,7 @@ struct efi_scratch {
581 efi_sync_low_kernel_mappings(); \
582 preempt_disable(); \
583 __kernel_fpu_begin(); \
584 + firmware_restrict_branch_speculation_start(); \
585 \
586 if (efi_scratch.use_pgd) { \
587 efi_scratch.prev_cr3 = __read_cr3(); \
588 @@ -91,6 +103,7 @@ struct efi_scratch {
589 __flush_tlb_all(); \
590 } \
591 \
592 + firmware_restrict_branch_speculation_end(); \
593 __kernel_fpu_end(); \
594 preempt_enable(); \
595 })
596 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
597 index c931b88982a0..1de72ce514cd 100644
598 --- a/arch/x86/include/asm/mmu_context.h
599 +++ b/arch/x86/include/asm/mmu_context.h
600 @@ -74,6 +74,7 @@ static inline void *ldt_slot_va(int slot)
601 return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
602 #else
603 BUG();
604 + return (void *)fix_to_virt(FIX_HOLE);
605 #endif
606 }
607
608 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
609 index 81a1be326571..d0dabeae0505 100644
610 --- a/arch/x86/include/asm/nospec-branch.h
611 +++ b/arch/x86/include/asm/nospec-branch.h
612 @@ -8,6 +8,50 @@
613 #include <asm/cpufeatures.h>
614 #include <asm/msr-index.h>
615
616 +/*
617 + * Fill the CPU return stack buffer.
618 + *
619 + * Each entry in the RSB, if used for a speculative 'ret', contains an
620 + * infinite 'pause; lfence; jmp' loop to capture speculative execution.
621 + *
622 + * This is required in various cases for retpoline and IBRS-based
623 + * mitigations for the Spectre variant 2 vulnerability. Sometimes to
624 + * eliminate potentially bogus entries from the RSB, and sometimes
625 + * purely to ensure that it doesn't get empty, which on some CPUs would
626 + * allow predictions from other (unwanted!) sources to be used.
627 + *
628 + * We define a CPP macro such that it can be used from both .S files and
629 + * inline assembly. It's possible to do a .macro and then include that
630 + * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
631 + */
632 +
633 +#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
634 +#define RSB_FILL_LOOPS 16 /* To avoid underflow */
635 +
636 +/*
637 + * Google experimented with loop-unrolling and this turned out to be
638 + * the optimal version — two calls, each with their own speculation
639 + * trap should their return address end up getting used, in a loop.
640 + */
641 +#define __FILL_RETURN_BUFFER(reg, nr, sp) \
642 + mov $(nr/2), reg; \
643 +771: \
644 + call 772f; \
645 +773: /* speculation trap */ \
646 + pause; \
647 + lfence; \
648 + jmp 773b; \
649 +772: \
650 + call 774f; \
651 +775: /* speculation trap */ \
652 + pause; \
653 + lfence; \
654 + jmp 775b; \
655 +774: \
656 + dec reg; \
657 + jnz 771b; \
658 + add $(BITS_PER_LONG/8) * nr, sp;
659 +
660 #ifdef __ASSEMBLY__
661
662 /*
663 @@ -23,6 +67,18 @@
664 .popsection
665 .endm
666
667 +/*
668 + * This should be used immediately before an indirect jump/call. It tells
669 + * objtool the subsequent indirect jump/call is vouched safe for retpoline
670 + * builds.
671 + */
672 +.macro ANNOTATE_RETPOLINE_SAFE
673 + .Lannotate_\@:
674 + .pushsection .discard.retpoline_safe
675 + _ASM_PTR .Lannotate_\@
676 + .popsection
677 +.endm
678 +
679 /*
680 * These are the bare retpoline primitives for indirect jmp and call.
681 * Do not use these directly; they only exist to make the ALTERNATIVE
682 @@ -59,9 +115,9 @@
683 .macro JMP_NOSPEC reg:req
684 #ifdef CONFIG_RETPOLINE
685 ANNOTATE_NOSPEC_ALTERNATIVE
686 - ALTERNATIVE_2 __stringify(jmp *\reg), \
687 + ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \
688 __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
689 - __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
690 + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
691 #else
692 jmp *\reg
693 #endif
694 @@ -70,18 +126,25 @@
695 .macro CALL_NOSPEC reg:req
696 #ifdef CONFIG_RETPOLINE
697 ANNOTATE_NOSPEC_ALTERNATIVE
698 - ALTERNATIVE_2 __stringify(call *\reg), \
699 + ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
700 __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
701 - __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
702 + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
703 #else
704 call *\reg
705 #endif
706 .endm
707
708 -/* This clobbers the BX register */
709 -.macro FILL_RETURN_BUFFER nr:req ftr:req
710 + /*
711 + * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
712 + * monstrosity above, manually.
713 + */
714 +.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
715 #ifdef CONFIG_RETPOLINE
716 - ALTERNATIVE "", "call __clear_rsb", \ftr
717 + ANNOTATE_NOSPEC_ALTERNATIVE
718 + ALTERNATIVE "jmp .Lskip_rsb_\@", \
719 + __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
720 + \ftr
721 +.Lskip_rsb_\@:
722 #endif
723 .endm
724
725 @@ -93,6 +156,12 @@
726 ".long 999b - .\n\t" \
727 ".popsection\n\t"
728
729 +#define ANNOTATE_RETPOLINE_SAFE \
730 + "999:\n\t" \
731 + ".pushsection .discard.retpoline_safe\n\t" \
732 + _ASM_PTR " 999b\n\t" \
733 + ".popsection\n\t"
734 +
735 #if defined(CONFIG_X86_64) && defined(RETPOLINE)
736
737 /*
738 @@ -102,6 +171,7 @@
739 # define CALL_NOSPEC \
740 ANNOTATE_NOSPEC_ALTERNATIVE \
741 ALTERNATIVE( \
742 + ANNOTATE_RETPOLINE_SAFE \
743 "call *%[thunk_target]\n", \
744 "call __x86_indirect_thunk_%V[thunk_target]\n", \
745 X86_FEATURE_RETPOLINE)
746 @@ -156,26 +226,54 @@ extern char __indirect_thunk_end[];
747 static inline void vmexit_fill_RSB(void)
748 {
749 #ifdef CONFIG_RETPOLINE
750 - alternative_input("",
751 - "call __fill_rsb",
752 - X86_FEATURE_RETPOLINE,
753 - ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
754 + unsigned long loops;
755 +
756 + asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
757 + ALTERNATIVE("jmp 910f",
758 + __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
759 + X86_FEATURE_RETPOLINE)
760 + "910:"
761 + : "=r" (loops), ASM_CALL_CONSTRAINT
762 + : : "memory" );
763 #endif
764 }
765
766 +#define alternative_msr_write(_msr, _val, _feature) \
767 + asm volatile(ALTERNATIVE("", \
768 + "movl %[msr], %%ecx\n\t" \
769 + "movl %[val], %%eax\n\t" \
770 + "movl $0, %%edx\n\t" \
771 + "wrmsr", \
772 + _feature) \
773 + : : [msr] "i" (_msr), [val] "i" (_val) \
774 + : "eax", "ecx", "edx", "memory")
775 +
776 static inline void indirect_branch_prediction_barrier(void)
777 {
778 - asm volatile(ALTERNATIVE("",
779 - "movl %[msr], %%ecx\n\t"
780 - "movl %[val], %%eax\n\t"
781 - "movl $0, %%edx\n\t"
782 - "wrmsr",
783 - X86_FEATURE_USE_IBPB)
784 - : : [msr] "i" (MSR_IA32_PRED_CMD),
785 - [val] "i" (PRED_CMD_IBPB)
786 - : "eax", "ecx", "edx", "memory");
787 + alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
788 + X86_FEATURE_USE_IBPB);
789 }
790
791 +/*
792 + * With retpoline, we must use IBRS to restrict branch prediction
793 + * before calling into firmware.
794 + *
795 + * (Implemented as CPP macros due to header hell.)
796 + */
797 +#define firmware_restrict_branch_speculation_start() \
798 +do { \
799 + preempt_disable(); \
800 + alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
801 + X86_FEATURE_USE_IBRS_FW); \
802 +} while (0)
803 +
804 +#define firmware_restrict_branch_speculation_end() \
805 +do { \
806 + alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
807 + X86_FEATURE_USE_IBRS_FW); \
808 + preempt_enable(); \
809 +} while (0)
810 +
811 #endif /* __ASSEMBLY__ */
812
813 /*
814 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
815 index 554841fab717..c83a2f418cea 100644
816 --- a/arch/x86/include/asm/paravirt.h
817 +++ b/arch/x86/include/asm/paravirt.h
818 @@ -7,6 +7,7 @@
819 #ifdef CONFIG_PARAVIRT
820 #include <asm/pgtable_types.h>
821 #include <asm/asm.h>
822 +#include <asm/nospec-branch.h>
823
824 #include <asm/paravirt_types.h>
825
826 @@ -879,23 +880,27 @@ extern void default_banner(void);
827
828 #define INTERRUPT_RETURN \
829 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
830 - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
831 + ANNOTATE_RETPOLINE_SAFE; \
832 + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);)
833
834 #define DISABLE_INTERRUPTS(clobbers) \
835 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
836 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
837 + ANNOTATE_RETPOLINE_SAFE; \
838 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
839 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
840
841 #define ENABLE_INTERRUPTS(clobbers) \
842 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
843 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
844 + ANNOTATE_RETPOLINE_SAFE; \
845 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
846 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
847
848 #ifdef CONFIG_X86_32
849 #define GET_CR0_INTO_EAX \
850 push %ecx; push %edx; \
851 + ANNOTATE_RETPOLINE_SAFE; \
852 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
853 pop %edx; pop %ecx
854 #else /* !CONFIG_X86_32 */
855 @@ -917,21 +922,25 @@ extern void default_banner(void);
856 */
857 #define SWAPGS \
858 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
859 - call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
860 + ANNOTATE_RETPOLINE_SAFE; \
861 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
862 )
863
864 #define GET_CR2_INTO_RAX \
865 - call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
866 + ANNOTATE_RETPOLINE_SAFE; \
867 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2);
868
869 #define USERGS_SYSRET64 \
870 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
871 CLBR_NONE, \
872 - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
873 + ANNOTATE_RETPOLINE_SAFE; \
874 + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);)
875
876 #ifdef CONFIG_DEBUG_ENTRY
877 #define SAVE_FLAGS(clobbers) \
878 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
879 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
880 + ANNOTATE_RETPOLINE_SAFE; \
881 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \
882 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
883 #endif
884 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
885 index f624f1f10316..180bc0bff0fb 100644
886 --- a/arch/x86/include/asm/paravirt_types.h
887 +++ b/arch/x86/include/asm/paravirt_types.h
888 @@ -43,6 +43,7 @@
889 #include <asm/desc_defs.h>
890 #include <asm/kmap_types.h>
891 #include <asm/pgtable_types.h>
892 +#include <asm/nospec-branch.h>
893
894 struct page;
895 struct thread_struct;
896 @@ -392,7 +393,9 @@ int paravirt_disable_iospace(void);
897 * offset into the paravirt_patch_template structure, and can therefore be
898 * freely converted back into a structure offset.
899 */
900 -#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
901 +#define PARAVIRT_CALL \
902 + ANNOTATE_RETPOLINE_SAFE \
903 + "call *%c[paravirt_opptr];"
904
905 /*
906 * These macros are intended to wrap calls through one of the paravirt
907 diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
908 index 4e44250e7d0d..d65171120e90 100644
909 --- a/arch/x86/include/asm/refcount.h
910 +++ b/arch/x86/include/asm/refcount.h
911 @@ -67,13 +67,13 @@ static __always_inline __must_check
912 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
913 {
914 GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
915 - r->refs.counter, "er", i, "%0", e);
916 + r->refs.counter, "er", i, "%0", e, "cx");
917 }
918
919 static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
920 {
921 GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
922 - r->refs.counter, "%0", e);
923 + r->refs.counter, "%0", e, "cx");
924 }
925
926 static __always_inline __must_check
927 diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
928 index f91c365e57c3..4914a3e7c803 100644
929 --- a/arch/x86/include/asm/rmwcc.h
930 +++ b/arch/x86/include/asm/rmwcc.h
931 @@ -2,8 +2,7 @@
932 #ifndef _ASM_X86_RMWcc
933 #define _ASM_X86_RMWcc
934
935 -#define __CLOBBERS_MEM "memory"
936 -#define __CLOBBERS_MEM_CC_CX "memory", "cc", "cx"
937 +#define __CLOBBERS_MEM(clb...) "memory", ## clb
938
939 #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
940
941 @@ -40,18 +39,19 @@ do { \
942 #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
943
944 #define GEN_UNARY_RMWcc(op, var, arg0, cc) \
945 - __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM)
946 + __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
947
948 -#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc) \
949 +#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\
950 __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \
951 - __CLOBBERS_MEM_CC_CX)
952 + __CLOBBERS_MEM(clobbers))
953
954 #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
955 __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \
956 - __CLOBBERS_MEM, vcon (val))
957 + __CLOBBERS_MEM(), vcon (val))
958
959 -#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc) \
960 +#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc, \
961 + clobbers...) \
962 __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \
963 - __CLOBBERS_MEM_CC_CX, vcon (val))
964 + __CLOBBERS_MEM(clobbers), vcon (val))
965
966 #endif /* _ASM_X86_RMWcc */
967 diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
968 index d6baf23782bc..5c019d23d06b 100644
969 --- a/arch/x86/include/asm/sections.h
970 +++ b/arch/x86/include/asm/sections.h
971 @@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[];
972
973 #if defined(CONFIG_X86_64)
974 extern char __end_rodata_hpage_align[];
975 +extern char __entry_trampoline_start[], __entry_trampoline_end[];
976 #endif
977
978 #endif /* _ASM_X86_SECTIONS_H */
979 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
980 index 461f53d27708..a4189762b266 100644
981 --- a/arch/x86/include/asm/smp.h
982 +++ b/arch/x86/include/asm/smp.h
983 @@ -129,6 +129,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
984 void cpu_disable_common(void);
985 void native_smp_prepare_boot_cpu(void);
986 void native_smp_prepare_cpus(unsigned int max_cpus);
987 +void calculate_max_logical_packages(void);
988 void native_smp_cpus_done(unsigned int max_cpus);
989 void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
990 int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
991 diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
992 index 91723461dc1f..435db58a7bad 100644
993 --- a/arch/x86/include/uapi/asm/mce.h
994 +++ b/arch/x86/include/uapi/asm/mce.h
995 @@ -30,6 +30,7 @@ struct mce {
996 __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
997 __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
998 __u64 ppin; /* Protected Processor Inventory Number */
999 + __u32 microcode;/* Microcode revision */
1000 };
1001
1002 #define MCE_GET_RECORD_LEN _IOR('M', 1, int)
1003 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
1004 index 8a7963421460..93d5f55cd8b6 100644
1005 --- a/arch/x86/kernel/apic/io_apic.c
1006 +++ b/arch/x86/kernel/apic/io_apic.c
1007 @@ -1603,7 +1603,7 @@ static void __init delay_with_tsc(void)
1008 do {
1009 rep_nop();
1010 now = rdtsc();
1011 - } while ((now - start) < 40000000000UL / HZ &&
1012 + } while ((now - start) < 40000000000ULL / HZ &&
1013 time_before_eq(jiffies, end));
1014 }
1015
1016 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
1017 index d71c8b54b696..bfca937bdcc3 100644
1018 --- a/arch/x86/kernel/cpu/bugs.c
1019 +++ b/arch/x86/kernel/cpu/bugs.c
1020 @@ -300,6 +300,15 @@ static void __init spectre_v2_select_mitigation(void)
1021 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
1022 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
1023 }
1024 +
1025 + /*
1026 + * Retpoline means the kernel is safe because it has no indirect
1027 + * branches. But firmware isn't, so use IBRS to protect that.
1028 + */
1029 + if (boot_cpu_has(X86_FEATURE_IBRS)) {
1030 + setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
1031 + pr_info("Enabling Restricted Speculation for firmware calls\n");
1032 + }
1033 }
1034
1035 #undef pr_fmt
1036 @@ -326,8 +335,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
1037 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1038 return sprintf(buf, "Not affected\n");
1039
1040 - return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1041 + return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1042 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
1043 + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1044 spectre_v2_module_string());
1045 }
1046 #endif
1047 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
1048 index d19e903214b4..4aa9fd379390 100644
1049 --- a/arch/x86/kernel/cpu/intel.c
1050 +++ b/arch/x86/kernel/cpu/intel.c
1051 @@ -144,6 +144,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
1052 {
1053 int i;
1054
1055 + /*
1056 + * We know that the hypervisor lie to us on the microcode version so
1057 + * we may as well hope that it is running the correct version.
1058 + */
1059 + if (cpu_has(c, X86_FEATURE_HYPERVISOR))
1060 + return false;
1061 +
1062 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
1063 if (c->x86_model == spectre_bad_microcodes[i].model &&
1064 c->x86_stepping == spectre_bad_microcodes[i].stepping)
1065 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
1066 index 2fe482f6ecd8..7a16a0fd1cb1 100644
1067 --- a/arch/x86/kernel/cpu/mcheck/mce.c
1068 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
1069 @@ -57,6 +57,9 @@
1070
1071 static DEFINE_MUTEX(mce_log_mutex);
1072
1073 +/* sysfs synchronization */
1074 +static DEFINE_MUTEX(mce_sysfs_mutex);
1075 +
1076 #define CREATE_TRACE_POINTS
1077 #include <trace/events/mce.h>
1078
1079 @@ -131,6 +134,8 @@ void mce_setup(struct mce *m)
1080
1081 if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
1082 rdmsrl(MSR_PPIN, m->ppin);
1083 +
1084 + m->microcode = boot_cpu_data.microcode;
1085 }
1086
1087 DEFINE_PER_CPU(struct mce, injectm);
1088 @@ -263,7 +268,7 @@ static void __print_mce(struct mce *m)
1089 */
1090 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
1091 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
1092 - cpu_data(m->extcpu).microcode);
1093 + m->microcode);
1094 }
1095
1096 static void print_mce(struct mce *m)
1097 @@ -2078,6 +2083,7 @@ static ssize_t set_ignore_ce(struct device *s,
1098 if (kstrtou64(buf, 0, &new) < 0)
1099 return -EINVAL;
1100
1101 + mutex_lock(&mce_sysfs_mutex);
1102 if (mca_cfg.ignore_ce ^ !!new) {
1103 if (new) {
1104 /* disable ce features */
1105 @@ -2090,6 +2096,8 @@ static ssize_t set_ignore_ce(struct device *s,
1106 on_each_cpu(mce_enable_ce, (void *)1, 1);
1107 }
1108 }
1109 + mutex_unlock(&mce_sysfs_mutex);
1110 +
1111 return size;
1112 }
1113
1114 @@ -2102,6 +2110,7 @@ static ssize_t set_cmci_disabled(struct device *s,
1115 if (kstrtou64(buf, 0, &new) < 0)
1116 return -EINVAL;
1117
1118 + mutex_lock(&mce_sysfs_mutex);
1119 if (mca_cfg.cmci_disabled ^ !!new) {
1120 if (new) {
1121 /* disable cmci */
1122 @@ -2113,6 +2122,8 @@ static ssize_t set_cmci_disabled(struct device *s,
1123 on_each_cpu(mce_enable_ce, NULL, 1);
1124 }
1125 }
1126 + mutex_unlock(&mce_sysfs_mutex);
1127 +
1128 return size;
1129 }
1130
1131 @@ -2120,8 +2131,19 @@ static ssize_t store_int_with_restart(struct device *s,
1132 struct device_attribute *attr,
1133 const char *buf, size_t size)
1134 {
1135 - ssize_t ret = device_store_int(s, attr, buf, size);
1136 + unsigned long old_check_interval = check_interval;
1137 + ssize_t ret = device_store_ulong(s, attr, buf, size);
1138 +
1139 + if (check_interval == old_check_interval)
1140 + return ret;
1141 +
1142 + if (check_interval < 1)
1143 + check_interval = 1;
1144 +
1145 + mutex_lock(&mce_sysfs_mutex);
1146 mce_restart();
1147 + mutex_unlock(&mce_sysfs_mutex);
1148 +
1149 return ret;
1150 }
1151
1152 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
1153 index 04a625f0fcda..0f545b3cf926 100644
1154 --- a/arch/x86/kernel/head_64.S
1155 +++ b/arch/x86/kernel/head_64.S
1156 @@ -23,6 +23,7 @@
1157 #include <asm/nops.h>
1158 #include "../entry/calling.h"
1159 #include <asm/export.h>
1160 +#include <asm/nospec-branch.h>
1161
1162 #ifdef CONFIG_PARAVIRT
1163 #include <asm/asm-offsets.h>
1164 @@ -134,6 +135,7 @@ ENTRY(secondary_startup_64)
1165
1166 /* Ensure I am executing from virtual addresses */
1167 movq $1f, %rax
1168 + ANNOTATE_RETPOLINE_SAFE
1169 jmp *%rax
1170 1:
1171 UNWIND_HINT_EMPTY
1172 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
1173 index bd36f3c33cd0..0715f827607c 100644
1174 --- a/arch/x86/kernel/kprobes/core.c
1175 +++ b/arch/x86/kernel/kprobes/core.c
1176 @@ -1168,10 +1168,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler);
1177
1178 bool arch_within_kprobe_blacklist(unsigned long addr)
1179 {
1180 + bool is_in_entry_trampoline_section = false;
1181 +
1182 +#ifdef CONFIG_X86_64
1183 + is_in_entry_trampoline_section =
1184 + (addr >= (unsigned long)__entry_trampoline_start &&
1185 + addr < (unsigned long)__entry_trampoline_end);
1186 +#endif
1187 return (addr >= (unsigned long)__kprobes_text_start &&
1188 addr < (unsigned long)__kprobes_text_end) ||
1189 (addr >= (unsigned long)__entry_text_start &&
1190 - addr < (unsigned long)__entry_text_end);
1191 + addr < (unsigned long)__entry_text_end) ||
1192 + is_in_entry_trampoline_section;
1193 }
1194
1195 int __init arch_init_kprobes(void)
1196 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
1197 index 844279c3ff4a..d0829a6e1bf5 100644
1198 --- a/arch/x86/kernel/smpboot.c
1199 +++ b/arch/x86/kernel/smpboot.c
1200 @@ -1282,11 +1282,10 @@ void __init native_smp_prepare_boot_cpu(void)
1201 cpu_set_state_online(me);
1202 }
1203
1204 -void __init native_smp_cpus_done(unsigned int max_cpus)
1205 +void __init calculate_max_logical_packages(void)
1206 {
1207 int ncpus;
1208
1209 - pr_debug("Boot done\n");
1210 /*
1211 * Today neither Intel nor AMD support heterogenous systems so
1212 * extrapolate the boot cpu's data to all packages.
1213 @@ -1294,6 +1293,13 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1214 ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
1215 __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
1216 pr_info("Max logical packages: %u\n", __max_logical_packages);
1217 +}
1218 +
1219 +void __init native_smp_cpus_done(unsigned int max_cpus)
1220 +{
1221 + pr_debug("Boot done\n");
1222 +
1223 + calculate_max_logical_packages();
1224
1225 if (x86_has_numa_in_package)
1226 set_sched_topology(x86_numa_in_package_topology);
1227 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
1228 index 9b138a06c1a4..b854ebf5851b 100644
1229 --- a/arch/x86/kernel/vmlinux.lds.S
1230 +++ b/arch/x86/kernel/vmlinux.lds.S
1231 @@ -118,9 +118,11 @@ SECTIONS
1232
1233 #ifdef CONFIG_X86_64
1234 . = ALIGN(PAGE_SIZE);
1235 + VMLINUX_SYMBOL(__entry_trampoline_start) = .;
1236 _entry_trampoline = .;
1237 *(.entry_trampoline)
1238 . = ALIGN(PAGE_SIZE);
1239 + VMLINUX_SYMBOL(__entry_trampoline_end) = .;
1240 ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
1241 #endif
1242
1243 diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
1244 index 69a473919260..f23934bbaf4e 100644
1245 --- a/arch/x86/lib/Makefile
1246 +++ b/arch/x86/lib/Makefile
1247 @@ -27,7 +27,6 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
1248 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
1249 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
1250 lib-$(CONFIG_RETPOLINE) += retpoline.o
1251 -OBJECT_FILES_NON_STANDARD_retpoline.o :=y
1252
1253 obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
1254
1255 diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
1256 index 480edc3a5e03..c909961e678a 100644
1257 --- a/arch/x86/lib/retpoline.S
1258 +++ b/arch/x86/lib/retpoline.S
1259 @@ -7,7 +7,6 @@
1260 #include <asm/alternative-asm.h>
1261 #include <asm/export.h>
1262 #include <asm/nospec-branch.h>
1263 -#include <asm/bitsperlong.h>
1264
1265 .macro THUNK reg
1266 .section .text.__x86.indirect_thunk
1267 @@ -47,58 +46,3 @@ GENERATE_THUNK(r13)
1268 GENERATE_THUNK(r14)
1269 GENERATE_THUNK(r15)
1270 #endif
1271 -
1272 -/*
1273 - * Fill the CPU return stack buffer.
1274 - *
1275 - * Each entry in the RSB, if used for a speculative 'ret', contains an
1276 - * infinite 'pause; lfence; jmp' loop to capture speculative execution.
1277 - *
1278 - * This is required in various cases for retpoline and IBRS-based
1279 - * mitigations for the Spectre variant 2 vulnerability. Sometimes to
1280 - * eliminate potentially bogus entries from the RSB, and sometimes
1281 - * purely to ensure that it doesn't get empty, which on some CPUs would
1282 - * allow predictions from other (unwanted!) sources to be used.
1283 - *
1284 - * Google experimented with loop-unrolling and this turned out to be
1285 - * the optimal version - two calls, each with their own speculation
1286 - * trap should their return address end up getting used, in a loop.
1287 - */
1288 -.macro STUFF_RSB nr:req sp:req
1289 - mov $(\nr / 2), %_ASM_BX
1290 - .align 16
1291 -771:
1292 - call 772f
1293 -773: /* speculation trap */
1294 - pause
1295 - lfence
1296 - jmp 773b
1297 - .align 16
1298 -772:
1299 - call 774f
1300 -775: /* speculation trap */
1301 - pause
1302 - lfence
1303 - jmp 775b
1304 - .align 16
1305 -774:
1306 - dec %_ASM_BX
1307 - jnz 771b
1308 - add $((BITS_PER_LONG/8) * \nr), \sp
1309 -.endm
1310 -
1311 -#define RSB_FILL_LOOPS 16 /* To avoid underflow */
1312 -
1313 -ENTRY(__fill_rsb)
1314 - STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
1315 - ret
1316 -END(__fill_rsb)
1317 -EXPORT_SYMBOL_GPL(__fill_rsb)
1318 -
1319 -#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
1320 -
1321 -ENTRY(__clear_rsb)
1322 - STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
1323 - ret
1324 -END(__clear_rsb)
1325 -EXPORT_SYMBOL_GPL(__clear_rsb)
1326 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
1327 index 800de815519c..c88573d90f3e 100644
1328 --- a/arch/x86/mm/fault.c
1329 +++ b/arch/x86/mm/fault.c
1330 @@ -1248,10 +1248,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
1331 tsk = current;
1332 mm = tsk->mm;
1333
1334 - /*
1335 - * Detect and handle instructions that would cause a page fault for
1336 - * both a tracked kernel page and a userspace page.
1337 - */
1338 prefetchw(&mm->mmap_sem);
1339
1340 if (unlikely(kmmio_fault(regs, address)))
1341 diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
1342 index 01f682cf77a8..40a6085063d6 100644
1343 --- a/arch/x86/mm/mem_encrypt_boot.S
1344 +++ b/arch/x86/mm/mem_encrypt_boot.S
1345 @@ -15,6 +15,7 @@
1346 #include <asm/page.h>
1347 #include <asm/processor-flags.h>
1348 #include <asm/msr-index.h>
1349 +#include <asm/nospec-branch.h>
1350
1351 .text
1352 .code64
1353 @@ -59,6 +60,7 @@ ENTRY(sme_encrypt_execute)
1354 movq %rax, %r8 /* Workarea encryption routine */
1355 addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
1356
1357 + ANNOTATE_RETPOLINE_SAFE
1358 call *%rax /* Call the encryption routine */
1359
1360 pop %r12
1361 diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
1362 index de53bd15df5a..24bb7598774e 100644
1363 --- a/arch/x86/realmode/rm/trampoline_64.S
1364 +++ b/arch/x86/realmode/rm/trampoline_64.S
1365 @@ -102,7 +102,7 @@ ENTRY(startup_32)
1366 * don't we'll eventually crash trying to execute encrypted
1367 * instructions.
1368 */
1369 - bt $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
1370 + btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
1371 jnc .Ldone
1372 movl $MSR_K8_SYSCFG, %ecx
1373 rdmsr
1374 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
1375 index 77c959cf81e7..7a43b2ae19f1 100644
1376 --- a/arch/x86/xen/smp.c
1377 +++ b/arch/x86/xen/smp.c
1378 @@ -122,6 +122,8 @@ void __init xen_smp_cpus_done(unsigned int max_cpus)
1379
1380 if (xen_hvm_domain())
1381 native_smp_cpus_done(max_cpus);
1382 + else
1383 + calculate_max_logical_packages();
1384
1385 if (xen_have_vcpu_info_placement)
1386 return;
1387 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
1388 index d5fe720cf149..89d2ee00cced 100644
1389 --- a/drivers/block/loop.c
1390 +++ b/drivers/block/loop.c
1391 @@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
1392 struct iov_iter i;
1393 ssize_t bw;
1394
1395 - iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
1396 + iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
1397
1398 file_start_write(file);
1399 bw = vfs_iter_write(file, &i, ppos, 0);
1400 diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1401 index 3cec403a80b3..5294442505cb 100644
1402 --- a/drivers/char/tpm/tpm-interface.c
1403 +++ b/drivers/char/tpm/tpm-interface.c
1404 @@ -413,6 +413,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
1405 if (chip->dev.parent)
1406 pm_runtime_get_sync(chip->dev.parent);
1407
1408 + if (chip->ops->clk_enable != NULL)
1409 + chip->ops->clk_enable(chip, true);
1410 +
1411 /* Store the decision as chip->locality will be changed. */
1412 need_locality = chip->locality == -1;
1413
1414 @@ -489,6 +492,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
1415 chip->locality = -1;
1416 }
1417 out_no_locality:
1418 + if (chip->ops->clk_enable != NULL)
1419 + chip->ops->clk_enable(chip, false);
1420 +
1421 if (chip->dev.parent)
1422 pm_runtime_put_sync(chip->dev.parent);
1423
1424 diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
1425 index e2d1055fb814..f08949a5f678 100644
1426 --- a/drivers/char/tpm/tpm_tis.c
1427 +++ b/drivers/char/tpm/tpm_tis.c
1428 @@ -133,93 +133,14 @@ static int check_acpi_tpm2(struct device *dev)
1429 }
1430 #endif
1431
1432 -#ifdef CONFIG_X86
1433 -#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
1434 -#define ILB_REMAP_SIZE 0x100
1435 -#define LPC_CNTRL_REG_OFFSET 0x84
1436 -#define LPC_CLKRUN_EN (1 << 2)
1437 -
1438 -static void __iomem *ilb_base_addr;
1439 -
1440 -static inline bool is_bsw(void)
1441 -{
1442 - return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
1443 -}
1444 -
1445 -/**
1446 - * tpm_platform_begin_xfer() - clear LPC CLKRUN_EN i.e. clocks will be running
1447 - */
1448 -static void tpm_platform_begin_xfer(void)
1449 -{
1450 - u32 clkrun_val;
1451 -
1452 - if (!is_bsw())
1453 - return;
1454 -
1455 - clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
1456 -
1457 - /* Disable LPC CLKRUN# */
1458 - clkrun_val &= ~LPC_CLKRUN_EN;
1459 - iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
1460 -
1461 - /*
1462 - * Write any random value on port 0x80 which is on LPC, to make
1463 - * sure LPC clock is running before sending any TPM command.
1464 - */
1465 - outb(0xCC, 0x80);
1466 -
1467 -}
1468 -
1469 -/**
1470 - * tpm_platform_end_xfer() - set LPC CLKRUN_EN i.e. clocks can be turned off
1471 - */
1472 -static void tpm_platform_end_xfer(void)
1473 -{
1474 - u32 clkrun_val;
1475 -
1476 - if (!is_bsw())
1477 - return;
1478 -
1479 - clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
1480 -
1481 - /* Enable LPC CLKRUN# */
1482 - clkrun_val |= LPC_CLKRUN_EN;
1483 - iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
1484 -
1485 - /*
1486 - * Write any random value on port 0x80 which is on LPC, to make
1487 - * sure LPC clock is running before sending any TPM command.
1488 - */
1489 - outb(0xCC, 0x80);
1490 -
1491 -}
1492 -#else
1493 -static inline bool is_bsw(void)
1494 -{
1495 - return false;
1496 -}
1497 -
1498 -static void tpm_platform_begin_xfer(void)
1499 -{
1500 -}
1501 -
1502 -static void tpm_platform_end_xfer(void)
1503 -{
1504 -}
1505 -#endif
1506 -
1507 static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
1508 u8 *result)
1509 {
1510 struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
1511
1512 - tpm_platform_begin_xfer();
1513 -
1514 while (len--)
1515 *result++ = ioread8(phy->iobase + addr);
1516
1517 - tpm_platform_end_xfer();
1518 -
1519 return 0;
1520 }
1521
1522 @@ -228,13 +149,9 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
1523 {
1524 struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
1525
1526 - tpm_platform_begin_xfer();
1527 -
1528 while (len--)
1529 iowrite8(*value++, phy->iobase + addr);
1530
1531 - tpm_platform_end_xfer();
1532 -
1533 return 0;
1534 }
1535
1536 @@ -242,12 +159,8 @@ static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
1537 {
1538 struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
1539
1540 - tpm_platform_begin_xfer();
1541 -
1542 *result = ioread16(phy->iobase + addr);
1543
1544 - tpm_platform_end_xfer();
1545 -
1546 return 0;
1547 }
1548
1549 @@ -255,12 +168,8 @@ static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
1550 {
1551 struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
1552
1553 - tpm_platform_begin_xfer();
1554 -
1555 *result = ioread32(phy->iobase + addr);
1556
1557 - tpm_platform_end_xfer();
1558 -
1559 return 0;
1560 }
1561
1562 @@ -268,12 +177,8 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
1563 {
1564 struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
1565
1566 - tpm_platform_begin_xfer();
1567 -
1568 iowrite32(value, phy->iobase + addr);
1569
1570 - tpm_platform_end_xfer();
1571 -
1572 return 0;
1573 }
1574
1575 @@ -461,11 +366,6 @@ static int __init init_tis(void)
1576 if (rc)
1577 goto err_force;
1578
1579 -#ifdef CONFIG_X86
1580 - if (is_bsw())
1581 - ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
1582 - ILB_REMAP_SIZE);
1583 -#endif
1584 rc = platform_driver_register(&tis_drv);
1585 if (rc)
1586 goto err_platform;
1587 @@ -484,10 +384,6 @@ static int __init init_tis(void)
1588 err_platform:
1589 if (force_pdev)
1590 platform_device_unregister(force_pdev);
1591 -#ifdef CONFIG_X86
1592 - if (is_bsw())
1593 - iounmap(ilb_base_addr);
1594 -#endif
1595 err_force:
1596 return rc;
1597 }
1598 @@ -497,10 +393,6 @@ static void __exit cleanup_tis(void)
1599 pnp_unregister_driver(&tis_pnp_driver);
1600 platform_driver_unregister(&tis_drv);
1601
1602 -#ifdef CONFIG_X86
1603 - if (is_bsw())
1604 - iounmap(ilb_base_addr);
1605 -#endif
1606 if (force_pdev)
1607 platform_device_unregister(force_pdev);
1608 }
1609 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
1610 index 7561922bc8f8..08ae49dee8b1 100644
1611 --- a/drivers/char/tpm/tpm_tis_core.c
1612 +++ b/drivers/char/tpm/tpm_tis_core.c
1613 @@ -31,6 +31,8 @@
1614 #include "tpm.h"
1615 #include "tpm_tis_core.h"
1616
1617 +static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
1618 +
1619 /* Before we attempt to access the TPM we must see that the valid bit is set.
1620 * The specification says that this bit is 0 at reset and remains 0 until the
1621 * 'TPM has gone through its self test and initialization and has established
1622 @@ -422,19 +424,28 @@ static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
1623 int i, rc;
1624 u32 did_vid;
1625
1626 + if (chip->ops->clk_enable != NULL)
1627 + chip->ops->clk_enable(chip, true);
1628 +
1629 rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
1630 if (rc < 0)
1631 - return rc;
1632 + goto out;
1633
1634 for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
1635 if (vendor_timeout_overrides[i].did_vid != did_vid)
1636 continue;
1637 memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
1638 sizeof(vendor_timeout_overrides[i].timeout_us));
1639 - return true;
1640 + rc = true;
1641 }
1642
1643 - return false;
1644 + rc = false;
1645 +
1646 +out:
1647 + if (chip->ops->clk_enable != NULL)
1648 + chip->ops->clk_enable(chip, false);
1649 +
1650 + return rc;
1651 }
1652
1653 /*
1654 @@ -654,14 +665,73 @@ void tpm_tis_remove(struct tpm_chip *chip)
1655 u32 interrupt;
1656 int rc;
1657
1658 + tpm_tis_clkrun_enable(chip, true);
1659 +
1660 rc = tpm_tis_read32(priv, reg, &interrupt);
1661 if (rc < 0)
1662 interrupt = 0;
1663
1664 tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
1665 +
1666 + tpm_tis_clkrun_enable(chip, false);
1667 +
1668 + if (priv->ilb_base_addr)
1669 + iounmap(priv->ilb_base_addr);
1670 }
1671 EXPORT_SYMBOL_GPL(tpm_tis_remove);
1672
1673 +/**
1674 + * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration
1675 + * of a single TPM command
1676 + * @chip: TPM chip to use
1677 + * @value: 1 - Disable CLKRUN protocol, so that clocks are free running
1678 + * 0 - Enable CLKRUN protocol
1679 + * Call this function directly in tpm_tis_remove() in error or driver removal
1680 + * path, since the chip->ops is set to NULL in tpm_chip_unregister().
1681 + */
1682 +static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
1683 +{
1684 + struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
1685 + u32 clkrun_val;
1686 +
1687 + if (!IS_ENABLED(CONFIG_X86) || !is_bsw() ||
1688 + !data->ilb_base_addr)
1689 + return;
1690 +
1691 + if (value) {
1692 + data->clkrun_enabled++;
1693 + if (data->clkrun_enabled > 1)
1694 + return;
1695 + clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
1696 +
1697 + /* Disable LPC CLKRUN# */
1698 + clkrun_val &= ~LPC_CLKRUN_EN;
1699 + iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
1700 +
1701 + /*
1702 + * Write any random value on port 0x80 which is on LPC, to make
1703 + * sure LPC clock is running before sending any TPM command.
1704 + */
1705 + outb(0xCC, 0x80);
1706 + } else {
1707 + data->clkrun_enabled--;
1708 + if (data->clkrun_enabled)
1709 + return;
1710 +
1711 + clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
1712 +
1713 + /* Enable LPC CLKRUN# */
1714 + clkrun_val |= LPC_CLKRUN_EN;
1715 + iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
1716 +
1717 + /*
1718 + * Write any random value on port 0x80 which is on LPC, to make
1719 + * sure LPC clock is running before sending any TPM command.
1720 + */
1721 + outb(0xCC, 0x80);
1722 + }
1723 +}
1724 +
1725 static const struct tpm_class_ops tpm_tis = {
1726 .flags = TPM_OPS_AUTO_STARTUP,
1727 .status = tpm_tis_status,
1728 @@ -674,6 +744,7 @@ static const struct tpm_class_ops tpm_tis = {
1729 .req_canceled = tpm_tis_req_canceled,
1730 .request_locality = request_locality,
1731 .relinquish_locality = release_locality,
1732 + .clk_enable = tpm_tis_clkrun_enable,
1733 };
1734
1735 int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
1736 @@ -681,6 +752,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
1737 acpi_handle acpi_dev_handle)
1738 {
1739 u32 vendor, intfcaps, intmask;
1740 + u32 clkrun_val;
1741 u8 rid;
1742 int rc, probe;
1743 struct tpm_chip *chip;
1744 @@ -701,6 +773,23 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
1745 priv->phy_ops = phy_ops;
1746 dev_set_drvdata(&chip->dev, priv);
1747
1748 + if (is_bsw()) {
1749 + priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
1750 + ILB_REMAP_SIZE);
1751 + if (!priv->ilb_base_addr)
1752 + return -ENOMEM;
1753 +
1754 + clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET);
1755 + /* Check if CLKRUN# is already not enabled in the LPC bus */
1756 + if (!(clkrun_val & LPC_CLKRUN_EN)) {
1757 + iounmap(priv->ilb_base_addr);
1758 + priv->ilb_base_addr = NULL;
1759 + }
1760 + }
1761 +
1762 + if (chip->ops->clk_enable != NULL)
1763 + chip->ops->clk_enable(chip, true);
1764 +
1765 if (wait_startup(chip, 0) != 0) {
1766 rc = -ENODEV;
1767 goto out_err;
1768 @@ -791,9 +880,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
1769 }
1770 }
1771
1772 - return tpm_chip_register(chip);
1773 + rc = tpm_chip_register(chip);
1774 + if (rc)
1775 + goto out_err;
1776 +
1777 + if (chip->ops->clk_enable != NULL)
1778 + chip->ops->clk_enable(chip, false);
1779 +
1780 + return 0;
1781 out_err:
1782 + if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
1783 + chip->ops->clk_enable(chip, false);
1784 +
1785 tpm_tis_remove(chip);
1786 +
1787 return rc;
1788 }
1789 EXPORT_SYMBOL_GPL(tpm_tis_core_init);
1790 @@ -805,22 +905,31 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
1791 u32 intmask;
1792 int rc;
1793
1794 + if (chip->ops->clk_enable != NULL)
1795 + chip->ops->clk_enable(chip, true);
1796 +
1797 /* reenable interrupts that device may have lost or
1798 * BIOS/firmware may have disabled
1799 */
1800 rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
1801 if (rc < 0)
1802 - return;
1803 + goto out;
1804
1805 rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
1806 if (rc < 0)
1807 - return;
1808 + goto out;
1809
1810 intmask |= TPM_INTF_CMD_READY_INT
1811 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
1812 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
1813
1814 tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
1815 +
1816 +out:
1817 + if (chip->ops->clk_enable != NULL)
1818 + chip->ops->clk_enable(chip, false);
1819 +
1820 + return;
1821 }
1822
1823 int tpm_tis_resume(struct device *dev)
1824 diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
1825 index 6bbac319ff3b..d5c6a2e952b3 100644
1826 --- a/drivers/char/tpm/tpm_tis_core.h
1827 +++ b/drivers/char/tpm/tpm_tis_core.h
1828 @@ -79,6 +79,11 @@ enum tis_defaults {
1829 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
1830 #define TPM_RID(l) (0x0F04 | ((l) << 12))
1831
1832 +#define LPC_CNTRL_OFFSET 0x84
1833 +#define LPC_CLKRUN_EN (1 << 2)
1834 +#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
1835 +#define ILB_REMAP_SIZE 0x100
1836 +
1837 enum tpm_tis_flags {
1838 TPM_TIS_ITPM_WORKAROUND = BIT(0),
1839 };
1840 @@ -89,6 +94,8 @@ struct tpm_tis_data {
1841 int irq;
1842 bool irq_tested;
1843 unsigned int flags;
1844 + void __iomem *ilb_base_addr;
1845 + u16 clkrun_enabled;
1846 wait_queue_head_t int_queue;
1847 wait_queue_head_t read_queue;
1848 const struct tpm_tis_phy_ops *phy_ops;
1849 @@ -144,6 +151,15 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
1850 return data->phy_ops->write32(data, addr, value);
1851 }
1852
1853 +static inline bool is_bsw(void)
1854 +{
1855 +#ifdef CONFIG_X86
1856 + return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
1857 +#else
1858 + return false;
1859 +#endif
1860 +}
1861 +
1862 void tpm_tis_remove(struct tpm_chip *chip);
1863 int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
1864 const struct tpm_tis_phy_ops *phy_ops,
1865 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
1866 index 57afad79f55d..8fa850a070e0 100644
1867 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
1868 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
1869 @@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
1870 size_t size;
1871 u32 retry = 3;
1872
1873 + if (amdgpu_acpi_pcie_notify_device_ready(adev))
1874 + return -EINVAL;
1875 +
1876 /* Get the device handle */
1877 handle = ACPI_HANDLE(&adev->pdev->dev);
1878 if (!handle)
1879 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1880 index df9cbc78e168..21e7ae159dff 100644
1881 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1882 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1883 @@ -737,9 +737,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
1884 enum drm_connector_status ret = connector_status_disconnected;
1885 int r;
1886
1887 - r = pm_runtime_get_sync(connector->dev->dev);
1888 - if (r < 0)
1889 - return connector_status_disconnected;
1890 + if (!drm_kms_helper_is_poll_worker()) {
1891 + r = pm_runtime_get_sync(connector->dev->dev);
1892 + if (r < 0)
1893 + return connector_status_disconnected;
1894 + }
1895
1896 if (encoder) {
1897 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1898 @@ -758,8 +760,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
1899 /* check acpi lid status ??? */
1900
1901 amdgpu_connector_update_scratch_regs(connector, ret);
1902 - pm_runtime_mark_last_busy(connector->dev->dev);
1903 - pm_runtime_put_autosuspend(connector->dev->dev);
1904 +
1905 + if (!drm_kms_helper_is_poll_worker()) {
1906 + pm_runtime_mark_last_busy(connector->dev->dev);
1907 + pm_runtime_put_autosuspend(connector->dev->dev);
1908 + }
1909 +
1910 return ret;
1911 }
1912
1913 @@ -869,9 +875,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
1914 enum drm_connector_status ret = connector_status_disconnected;
1915 int r;
1916
1917 - r = pm_runtime_get_sync(connector->dev->dev);
1918 - if (r < 0)
1919 - return connector_status_disconnected;
1920 + if (!drm_kms_helper_is_poll_worker()) {
1921 + r = pm_runtime_get_sync(connector->dev->dev);
1922 + if (r < 0)
1923 + return connector_status_disconnected;
1924 + }
1925
1926 encoder = amdgpu_connector_best_single_encoder(connector);
1927 if (!encoder)
1928 @@ -925,8 +933,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
1929 amdgpu_connector_update_scratch_regs(connector, ret);
1930
1931 out:
1932 - pm_runtime_mark_last_busy(connector->dev->dev);
1933 - pm_runtime_put_autosuspend(connector->dev->dev);
1934 + if (!drm_kms_helper_is_poll_worker()) {
1935 + pm_runtime_mark_last_busy(connector->dev->dev);
1936 + pm_runtime_put_autosuspend(connector->dev->dev);
1937 + }
1938
1939 return ret;
1940 }
1941 @@ -989,9 +999,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
1942 enum drm_connector_status ret = connector_status_disconnected;
1943 bool dret = false, broken_edid = false;
1944
1945 - r = pm_runtime_get_sync(connector->dev->dev);
1946 - if (r < 0)
1947 - return connector_status_disconnected;
1948 + if (!drm_kms_helper_is_poll_worker()) {
1949 + r = pm_runtime_get_sync(connector->dev->dev);
1950 + if (r < 0)
1951 + return connector_status_disconnected;
1952 + }
1953
1954 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
1955 ret = connector->status;
1956 @@ -1116,8 +1128,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
1957 amdgpu_connector_update_scratch_regs(connector, ret);
1958
1959 exit:
1960 - pm_runtime_mark_last_busy(connector->dev->dev);
1961 - pm_runtime_put_autosuspend(connector->dev->dev);
1962 + if (!drm_kms_helper_is_poll_worker()) {
1963 + pm_runtime_mark_last_busy(connector->dev->dev);
1964 + pm_runtime_put_autosuspend(connector->dev->dev);
1965 + }
1966
1967 return ret;
1968 }
1969 @@ -1360,9 +1374,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
1970 struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
1971 int r;
1972
1973 - r = pm_runtime_get_sync(connector->dev->dev);
1974 - if (r < 0)
1975 - return connector_status_disconnected;
1976 + if (!drm_kms_helper_is_poll_worker()) {
1977 + r = pm_runtime_get_sync(connector->dev->dev);
1978 + if (r < 0)
1979 + return connector_status_disconnected;
1980 + }
1981
1982 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
1983 ret = connector->status;
1984 @@ -1430,8 +1446,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
1985
1986 amdgpu_connector_update_scratch_regs(connector, ret);
1987 out:
1988 - pm_runtime_mark_last_busy(connector->dev->dev);
1989 - pm_runtime_put_autosuspend(connector->dev->dev);
1990 + if (!drm_kms_helper_is_poll_worker()) {
1991 + pm_runtime_mark_last_busy(connector->dev->dev);
1992 + pm_runtime_put_autosuspend(connector->dev->dev);
1993 + }
1994
1995 return ret;
1996 }
1997 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1998 index e8bd50cf9785..9df2a8c7d35d 100644
1999 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
2000 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
2001 @@ -297,12 +297,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
2002 if (adev->uvd.vcpu_bo == NULL)
2003 return 0;
2004
2005 - for (i = 0; i < adev->uvd.max_handles; ++i)
2006 - if (atomic_read(&adev->uvd.handles[i]))
2007 - break;
2008 + /* only valid for physical mode */
2009 + if (adev->asic_type < CHIP_POLARIS10) {
2010 + for (i = 0; i < adev->uvd.max_handles; ++i)
2011 + if (atomic_read(&adev->uvd.handles[i]))
2012 + break;
2013
2014 - if (i == AMDGPU_MAX_UVD_HANDLES)
2015 - return 0;
2016 + if (i == adev->uvd.max_handles)
2017 + return 0;
2018 + }
2019
2020 cancel_delayed_work_sync(&adev->uvd.idle_work);
2021
2022 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
2023 index 419ba0ce7ee5..356ca560c80e 100644
2024 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
2025 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
2026 @@ -4403,34 +4403,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
2027 case CHIP_KAVERI:
2028 adev->gfx.config.max_shader_engines = 1;
2029 adev->gfx.config.max_tile_pipes = 4;
2030 - if ((adev->pdev->device == 0x1304) ||
2031 - (adev->pdev->device == 0x1305) ||
2032 - (adev->pdev->device == 0x130C) ||
2033 - (adev->pdev->device == 0x130F) ||
2034 - (adev->pdev->device == 0x1310) ||
2035 - (adev->pdev->device == 0x1311) ||
2036 - (adev->pdev->device == 0x131C)) {
2037 - adev->gfx.config.max_cu_per_sh = 8;
2038 - adev->gfx.config.max_backends_per_se = 2;
2039 - } else if ((adev->pdev->device == 0x1309) ||
2040 - (adev->pdev->device == 0x130A) ||
2041 - (adev->pdev->device == 0x130D) ||
2042 - (adev->pdev->device == 0x1313) ||
2043 - (adev->pdev->device == 0x131D)) {
2044 - adev->gfx.config.max_cu_per_sh = 6;
2045 - adev->gfx.config.max_backends_per_se = 2;
2046 - } else if ((adev->pdev->device == 0x1306) ||
2047 - (adev->pdev->device == 0x1307) ||
2048 - (adev->pdev->device == 0x130B) ||
2049 - (adev->pdev->device == 0x130E) ||
2050 - (adev->pdev->device == 0x1315) ||
2051 - (adev->pdev->device == 0x131B)) {
2052 - adev->gfx.config.max_cu_per_sh = 4;
2053 - adev->gfx.config.max_backends_per_se = 1;
2054 - } else {
2055 - adev->gfx.config.max_cu_per_sh = 3;
2056 - adev->gfx.config.max_backends_per_se = 1;
2057 - }
2058 + adev->gfx.config.max_cu_per_sh = 8;
2059 + adev->gfx.config.max_backends_per_se = 2;
2060 adev->gfx.config.max_sh_per_se = 1;
2061 adev->gfx.config.max_texture_channel_caches = 4;
2062 adev->gfx.config.max_gprs = 256;
2063 diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
2064 index 8284d5dbfc30..4c178feeb4bd 100644
2065 --- a/drivers/gpu/drm/amd/amdgpu/si.c
2066 +++ b/drivers/gpu/drm/amd/amdgpu/si.c
2067 @@ -31,6 +31,7 @@
2068 #include "amdgpu_uvd.h"
2069 #include "amdgpu_vce.h"
2070 #include "atom.h"
2071 +#include "amd_pcie.h"
2072 #include "amdgpu_powerplay.h"
2073 #include "sid.h"
2074 #include "si_ih.h"
2075 @@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
2076 {
2077 struct pci_dev *root = adev->pdev->bus->self;
2078 int bridge_pos, gpu_pos;
2079 - u32 speed_cntl, mask, current_data_rate;
2080 - int ret, i;
2081 + u32 speed_cntl, current_data_rate;
2082 + int i;
2083 u16 tmp16;
2084
2085 if (pci_is_root_bus(adev->pdev->bus))
2086 @@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
2087 if (adev->flags & AMD_IS_APU)
2088 return;
2089
2090 - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2091 - if (ret != 0)
2092 - return;
2093 -
2094 - if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
2095 + if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2096 + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
2097 return;
2098
2099 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
2100 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
2101 LC_CURRENT_DATA_RATE_SHIFT;
2102 - if (mask & DRM_PCIE_SPEED_80) {
2103 + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
2104 if (current_data_rate == 2) {
2105 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
2106 return;
2107 }
2108 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
2109 - } else if (mask & DRM_PCIE_SPEED_50) {
2110 + } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
2111 if (current_data_rate == 1) {
2112 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
2113 return;
2114 @@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
2115 if (!gpu_pos)
2116 return;
2117
2118 - if (mask & DRM_PCIE_SPEED_80) {
2119 + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
2120 if (current_data_rate != 2) {
2121 u16 bridge_cfg, gpu_cfg;
2122 u16 bridge_cfg2, gpu_cfg2;
2123 @@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
2124
2125 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
2126 tmp16 &= ~0xf;
2127 - if (mask & DRM_PCIE_SPEED_80)
2128 + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2129 tmp16 |= 3;
2130 - else if (mask & DRM_PCIE_SPEED_50)
2131 + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
2132 tmp16 |= 2;
2133 else
2134 tmp16 |= 1;
2135 diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
2136 index 3af322adae76..ea80b7ca5c37 100644
2137 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
2138 +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
2139 @@ -26,6 +26,7 @@
2140 #include "amdgpu_pm.h"
2141 #include "amdgpu_dpm.h"
2142 #include "amdgpu_atombios.h"
2143 +#include "amd_pcie.h"
2144 #include "sid.h"
2145 #include "r600_dpm.h"
2146 #include "si_dpm.h"
2147 @@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
2148 }
2149 }
2150
2151 -static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
2152 - u32 sys_mask,
2153 - enum amdgpu_pcie_gen asic_gen,
2154 - enum amdgpu_pcie_gen default_gen)
2155 -{
2156 - switch (asic_gen) {
2157 - case AMDGPU_PCIE_GEN1:
2158 - return AMDGPU_PCIE_GEN1;
2159 - case AMDGPU_PCIE_GEN2:
2160 - return AMDGPU_PCIE_GEN2;
2161 - case AMDGPU_PCIE_GEN3:
2162 - return AMDGPU_PCIE_GEN3;
2163 - default:
2164 - if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
2165 - return AMDGPU_PCIE_GEN3;
2166 - else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
2167 - return AMDGPU_PCIE_GEN2;
2168 - else
2169 - return AMDGPU_PCIE_GEN1;
2170 - }
2171 - return AMDGPU_PCIE_GEN1;
2172 -}
2173 -
2174 static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
2175 u32 *p, u32 *u)
2176 {
2177 @@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
2178 table->ACPIState.levels[0].vddc.index,
2179 &table->ACPIState.levels[0].std_vddc);
2180 }
2181 - table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
2182 - si_pi->sys_pcie_mask,
2183 - si_pi->boot_pcie_gen,
2184 - AMDGPU_PCIE_GEN1);
2185 + table->ACPIState.levels[0].gen2PCIE =
2186 + (u8)amdgpu_get_pcie_gen_support(adev,
2187 + si_pi->sys_pcie_mask,
2188 + si_pi->boot_pcie_gen,
2189 + AMDGPU_PCIE_GEN1);
2190
2191 if (si_pi->vddc_phase_shed_control)
2192 si_populate_phase_shedding_value(adev,
2193 @@ -7172,10 +7151,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
2194 pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
2195 pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
2196 pl->flags = le32_to_cpu(clock_info->si.ulFlags);
2197 - pl->pcie_gen = r600_get_pcie_gen_support(adev,
2198 - si_pi->sys_pcie_mask,
2199 - si_pi->boot_pcie_gen,
2200 - clock_info->si.ucPCIEGen);
2201 + pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
2202 + si_pi->sys_pcie_mask,
2203 + si_pi->boot_pcie_gen,
2204 + clock_info->si.ucPCIEGen);
2205
2206 /* patch up vddc if necessary */
2207 ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
2208 @@ -7330,7 +7309,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
2209 struct si_power_info *si_pi;
2210 struct atom_clock_dividers dividers;
2211 int ret;
2212 - u32 mask;
2213
2214 si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
2215 if (si_pi == NULL)
2216 @@ -7340,11 +7318,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
2217 eg_pi = &ni_pi->eg;
2218 pi = &eg_pi->rv7xx;
2219
2220 - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2221 - if (ret)
2222 - si_pi->sys_pcie_mask = 0;
2223 - else
2224 - si_pi->sys_pcie_mask = mask;
2225 + si_pi->sys_pcie_mask =
2226 + (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
2227 + CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
2228 si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
2229 si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
2230
2231 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
2232 index e230cc44a0a7..bd6cab5a9f43 100644
2233 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
2234 +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
2235 @@ -200,7 +200,8 @@ bool dc_stream_set_cursor_attributes(
2236 for (i = 0; i < MAX_PIPES; i++) {
2237 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
2238
2239 - if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
2240 + if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm &&
2241 + !pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)
2242 continue;
2243 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
2244 continue;
2245 @@ -276,7 +277,8 @@ bool dc_stream_set_cursor_position(
2246 if (pipe_ctx->stream != stream ||
2247 (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
2248 !pipe_ctx->plane_state ||
2249 - (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
2250 + (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
2251 + !pipe_ctx->plane_res.ipp)
2252 continue;
2253
2254 if (pipe_ctx->plane_state->address.type
2255 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
2256 index fe88852b4774..00c728260616 100644
2257 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
2258 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
2259 @@ -683,6 +683,7 @@ void dce110_link_encoder_construct(
2260 {
2261 struct bp_encoder_cap_info bp_cap_info = {0};
2262 const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
2263 + enum bp_result result = BP_RESULT_OK;
2264
2265 enc110->base.funcs = &dce110_lnk_enc_funcs;
2266 enc110->base.ctx = init_data->ctx;
2267 @@ -757,15 +758,24 @@ void dce110_link_encoder_construct(
2268 enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
2269 }
2270
2271 + /* default to one to mirror Windows behavior */
2272 + enc110->base.features.flags.bits.HDMI_6GB_EN = 1;
2273 +
2274 + result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios,
2275 + enc110->base.id, &bp_cap_info);
2276 +
2277 /* Override features with DCE-specific values */
2278 - if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info(
2279 - enc110->base.ctx->dc_bios, enc110->base.id,
2280 - &bp_cap_info)) {
2281 + if (BP_RESULT_OK == result) {
2282 enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
2283 bp_cap_info.DP_HBR2_EN;
2284 enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
2285 bp_cap_info.DP_HBR3_EN;
2286 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
2287 + } else {
2288 + dm_logger_write(enc110->base.ctx->logger, LOG_WARNING,
2289 + "%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
2290 + __func__,
2291 + result);
2292 }
2293 }
2294
2295 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
2296 index e33ec7fc5d09..6688cdb216e9 100644
2297 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
2298 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
2299 @@ -2791,10 +2791,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2300 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2301
2302
2303 - disable_mclk_switching = ((1 < info.display_count) ||
2304 - disable_mclk_switching_for_frame_lock ||
2305 - smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
2306 - (mode_info.refresh_rate > 120));
2307 + if (info.display_count == 0)
2308 + disable_mclk_switching = false;
2309 + else
2310 + disable_mclk_switching = ((1 < info.display_count) ||
2311 + disable_mclk_switching_for_frame_lock ||
2312 + smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
2313 + (mode_info.refresh_rate > 120));
2314
2315 sclk = smu7_ps->performance_levels[0].engine_clock;
2316 mclk = smu7_ps->performance_levels[0].memory_clock;
2317 @@ -4569,13 +4572,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
2318 int tmp_result, result = 0;
2319 uint32_t sclk_mask = 0, mclk_mask = 0;
2320
2321 - if (hwmgr->chip_id == CHIP_FIJI) {
2322 - if (request->type == AMD_PP_GFX_PROFILE)
2323 - smu7_enable_power_containment(hwmgr);
2324 - else if (request->type == AMD_PP_COMPUTE_PROFILE)
2325 - smu7_disable_power_containment(hwmgr);
2326 - }
2327 -
2328 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
2329 return -EINVAL;
2330
2331 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
2332 index f8d838c2c8ee..9acbefb33bd6 100644
2333 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
2334 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
2335 @@ -3208,10 +3208,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2336 disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
2337 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
2338
2339 - disable_mclk_switching = (info.display_count > 1) ||
2340 - disable_mclk_switching_for_frame_lock ||
2341 - disable_mclk_switching_for_vr ||
2342 - force_mclk_high;
2343 + if (info.display_count == 0)
2344 + disable_mclk_switching = false;
2345 + else
2346 + disable_mclk_switching = (info.display_count > 1) ||
2347 + disable_mclk_switching_for_frame_lock ||
2348 + disable_mclk_switching_for_vr ||
2349 + force_mclk_high;
2350
2351 sclk = vega10_ps->performance_levels[0].gfx_clock;
2352 mclk = vega10_ps->performance_levels[0].mem_clock;
2353 diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
2354 index 279c1035c12d..5e1f1e2deb52 100644
2355 --- a/drivers/gpu/drm/drm_framebuffer.c
2356 +++ b/drivers/gpu/drm/drm_framebuffer.c
2357 @@ -118,6 +118,10 @@ int drm_mode_addfb(struct drm_device *dev,
2358 r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
2359 r.handles[0] = or->handle;
2360
2361 + if (r.pixel_format == DRM_FORMAT_XRGB2101010 &&
2362 + dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP)
2363 + r.pixel_format = DRM_FORMAT_XBGR2101010;
2364 +
2365 ret = drm_mode_addfb2(dev, &r, file_priv);
2366 if (ret)
2367 return ret;
2368 diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
2369 index 6dc2dde5b672..7a6b2dc08913 100644
2370 --- a/drivers/gpu/drm/drm_probe_helper.c
2371 +++ b/drivers/gpu/drm/drm_probe_helper.c
2372 @@ -654,6 +654,26 @@ static void output_poll_execute(struct work_struct *work)
2373 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
2374 }
2375
2376 +/**
2377 + * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
2378 + *
2379 + * Determine if %current task is an output poll worker. This can be used
2380 + * to select distinct code paths for output polling versus other contexts.
2381 + *
2382 + * One use case is to avoid a deadlock between the output poll worker and
2383 + * the autosuspend worker wherein the latter waits for polling to finish
2384 + * upon calling drm_kms_helper_poll_disable(), while the former waits for
2385 + * runtime suspend to finish upon calling pm_runtime_get_sync() in a
2386 + * connector ->detect hook.
2387 + */
2388 +bool drm_kms_helper_is_poll_worker(void)
2389 +{
2390 + struct work_struct *work = current_work();
2391 +
2392 + return work && work->func == output_poll_execute;
2393 +}
2394 +EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
2395 +
2396 /**
2397 * drm_kms_helper_poll_disable - disable output polling
2398 * @dev: drm_device
2399 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
2400 index 2cf10d17acfb..62004ea403c6 100644
2401 --- a/drivers/gpu/drm/i915/i915_drv.c
2402 +++ b/drivers/gpu/drm/i915/i915_drv.c
2403 @@ -1827,6 +1827,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
2404 if (IS_GEN9_LP(dev_priv) ||
2405 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
2406 intel_power_domains_init_hw(dev_priv, true);
2407 + else
2408 + intel_display_set_init_power(dev_priv, true);
2409
2410 i915_gem_sanitize(dev_priv);
2411
2412 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
2413 index 435ed95df144..3d0ae387691f 100644
2414 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
2415 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
2416 @@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
2417 list_add_tail(&vma->exec_link, &eb->unbound);
2418 if (drm_mm_node_allocated(&vma->node))
2419 err = i915_vma_unbind(vma);
2420 + if (unlikely(err))
2421 + vma->exec_flags = NULL;
2422 }
2423 return err;
2424 }
2425 @@ -2419,7 +2421,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2426 if (out_fence) {
2427 if (err == 0) {
2428 fd_install(out_fence_fd, out_fence->file);
2429 - args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
2430 + args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2431 args->rsvd2 |= (u64)out_fence_fd << 32;
2432 out_fence_fd = -1;
2433 } else {
2434 diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
2435 index 59ee808f8fd9..cc2a10f22c3d 100644
2436 --- a/drivers/gpu/drm/i915/i915_perf.c
2437 +++ b/drivers/gpu/drm/i915/i915_perf.c
2438 @@ -1301,9 +1301,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
2439 */
2440 mutex_lock(&dev_priv->drm.struct_mutex);
2441 dev_priv->perf.oa.exclusive_stream = NULL;
2442 - mutex_unlock(&dev_priv->drm.struct_mutex);
2443 -
2444 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
2445 + mutex_unlock(&dev_priv->drm.struct_mutex);
2446
2447 free_oa_buffer(dev_priv);
2448
2449 @@ -1755,22 +1754,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
2450 * Note: it's only the RCS/Render context that has any OA state.
2451 */
2452 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
2453 - const struct i915_oa_config *oa_config,
2454 - bool interruptible)
2455 + const struct i915_oa_config *oa_config)
2456 {
2457 struct i915_gem_context *ctx;
2458 int ret;
2459 unsigned int wait_flags = I915_WAIT_LOCKED;
2460
2461 - if (interruptible) {
2462 - ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2463 - if (ret)
2464 - return ret;
2465 -
2466 - wait_flags |= I915_WAIT_INTERRUPTIBLE;
2467 - } else {
2468 - mutex_lock(&dev_priv->drm.struct_mutex);
2469 - }
2470 + lockdep_assert_held(&dev_priv->drm.struct_mutex);
2471
2472 /* Switch away from any user context. */
2473 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
2474 @@ -1818,8 +1808,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
2475 }
2476
2477 out:
2478 - mutex_unlock(&dev_priv->drm.struct_mutex);
2479 -
2480 return ret;
2481 }
2482
2483 @@ -1862,7 +1850,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
2484 * to make sure all slices/subslices are ON before writing to NOA
2485 * registers.
2486 */
2487 - ret = gen8_configure_all_contexts(dev_priv, oa_config, true);
2488 + ret = gen8_configure_all_contexts(dev_priv, oa_config);
2489 if (ret)
2490 return ret;
2491
2492 @@ -1877,7 +1865,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
2493 static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
2494 {
2495 /* Reset all contexts' slices/subslices configurations. */
2496 - gen8_configure_all_contexts(dev_priv, NULL, false);
2497 + gen8_configure_all_contexts(dev_priv, NULL);
2498
2499 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
2500 ~GT_NOA_ENABLE));
2501 @@ -2127,6 +2115,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2502 if (ret)
2503 goto err_oa_buf_alloc;
2504
2505 + ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2506 + if (ret)
2507 + goto err_lock;
2508 +
2509 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
2510 stream->oa_config);
2511 if (ret)
2512 @@ -2134,23 +2126,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2513
2514 stream->ops = &i915_oa_stream_ops;
2515
2516 - /* Lock device for exclusive_stream access late because
2517 - * enable_metric_set() might lock as well on gen8+.
2518 - */
2519 - ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2520 - if (ret)
2521 - goto err_lock;
2522 -
2523 dev_priv->perf.oa.exclusive_stream = stream;
2524
2525 mutex_unlock(&dev_priv->drm.struct_mutex);
2526
2527 return 0;
2528
2529 -err_lock:
2530 +err_enable:
2531 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
2532 + mutex_unlock(&dev_priv->drm.struct_mutex);
2533
2534 -err_enable:
2535 +err_lock:
2536 free_oa_buffer(dev_priv);
2537
2538 err_oa_buf_alloc:
2539 diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
2540 index 0ddba16fde1b..538a762f7318 100644
2541 --- a/drivers/gpu/drm/i915/intel_audio.c
2542 +++ b/drivers/gpu/drm/i915/intel_audio.c
2543 @@ -754,11 +754,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
2544 {
2545 struct intel_encoder *encoder;
2546
2547 - if (WARN_ON(pipe >= INTEL_INFO(dev_priv)->num_pipes))
2548 - return NULL;
2549 -
2550 /* MST */
2551 if (pipe >= 0) {
2552 + if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
2553 + return NULL;
2554 +
2555 encoder = dev_priv->av_enc_map[pipe];
2556 /*
2557 * when bootup, audio driver may not know it is
2558 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2559 index 50f8443641b8..a83e18c72f7b 100644
2560 --- a/drivers/gpu/drm/i915/intel_display.c
2561 +++ b/drivers/gpu/drm/i915/intel_display.c
2562 @@ -14463,6 +14463,8 @@ static void sanitize_watermarks(struct drm_device *dev)
2563
2564 cs->wm.need_postvbl_update = true;
2565 dev_priv->display.optimize_watermarks(intel_state, cs);
2566 +
2567 + to_intel_crtc_state(crtc->state)->wm = cs->wm;
2568 }
2569
2570 put_state:
2571 diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
2572 index 4dea833f9d1b..847cda4c017c 100644
2573 --- a/drivers/gpu/drm/i915/intel_hdmi.c
2574 +++ b/drivers/gpu/drm/i915/intel_hdmi.c
2575 @@ -1573,12 +1573,20 @@ intel_hdmi_set_edid(struct drm_connector *connector)
2576 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
2577 struct edid *edid;
2578 bool connected = false;
2579 + struct i2c_adapter *i2c;
2580
2581 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
2582
2583 - edid = drm_get_edid(connector,
2584 - intel_gmbus_get_adapter(dev_priv,
2585 - intel_hdmi->ddc_bus));
2586 + i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
2587 +
2588 + edid = drm_get_edid(connector, i2c);
2589 +
2590 + if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
2591 + DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
2592 + intel_gmbus_force_bit(i2c, true);
2593 + edid = drm_get_edid(connector, i2c);
2594 + intel_gmbus_force_bit(i2c, false);
2595 + }
2596
2597 intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
2598
2599 diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
2600 index 7e115f3927f6..d169bfb98368 100644
2601 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c
2602 +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
2603 @@ -1844,6 +1844,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
2604 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2605 BIT_ULL(POWER_DOMAIN_MODESET) | \
2606 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2607 + BIT_ULL(POWER_DOMAIN_GMBUS) | \
2608 BIT_ULL(POWER_DOMAIN_INIT))
2609
2610 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2611 diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
2612 index 69d6e61a01ec..6ed9cb053dfa 100644
2613 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c
2614 +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
2615 @@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
2616 nv_connector->edid = NULL;
2617 }
2618
2619 - ret = pm_runtime_get_sync(connector->dev->dev);
2620 - if (ret < 0 && ret != -EACCES)
2621 - return conn_status;
2622 + /* Outputs are only polled while runtime active, so acquiring a
2623 + * runtime PM ref here is unnecessary (and would deadlock upon
2624 + * runtime suspend because it waits for polling to finish).
2625 + */
2626 + if (!drm_kms_helper_is_poll_worker()) {
2627 + ret = pm_runtime_get_sync(connector->dev->dev);
2628 + if (ret < 0 && ret != -EACCES)
2629 + return conn_status;
2630 + }
2631
2632 nv_encoder = nouveau_connector_ddc_detect(connector);
2633 if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
2634 @@ -647,8 +653,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
2635
2636 out:
2637
2638 - pm_runtime_mark_last_busy(connector->dev->dev);
2639 - pm_runtime_put_autosuspend(connector->dev->dev);
2640 + if (!drm_kms_helper_is_poll_worker()) {
2641 + pm_runtime_mark_last_busy(connector->dev->dev);
2642 + pm_runtime_put_autosuspend(connector->dev->dev);
2643 + }
2644
2645 return conn_status;
2646 }
2647 diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
2648 index 584466ef688f..325bff420f5a 100644
2649 --- a/drivers/gpu/drm/nouveau/nv50_display.c
2650 +++ b/drivers/gpu/drm/nouveau/nv50_display.c
2651 @@ -4426,6 +4426,7 @@ nv50_display_create(struct drm_device *dev)
2652 nouveau_display(dev)->fini = nv50_display_fini;
2653 disp->disp = &nouveau_display(dev)->disp;
2654 dev->mode_config.funcs = &nv50_disp_func;
2655 + dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
2656 if (nouveau_atomic)
2657 dev->driver->driver_features |= DRIVER_ATOMIC;
2658
2659 diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
2660 index a6511918f632..8ce36cf42055 100644
2661 --- a/drivers/gpu/drm/radeon/cik.c
2662 +++ b/drivers/gpu/drm/radeon/cik.c
2663 @@ -3228,35 +3228,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
2664 case CHIP_KAVERI:
2665 rdev->config.cik.max_shader_engines = 1;
2666 rdev->config.cik.max_tile_pipes = 4;
2667 - if ((rdev->pdev->device == 0x1304) ||
2668 - (rdev->pdev->device == 0x1305) ||
2669 - (rdev->pdev->device == 0x130C) ||
2670 - (rdev->pdev->device == 0x130F) ||
2671 - (rdev->pdev->device == 0x1310) ||
2672 - (rdev->pdev->device == 0x1311) ||
2673 - (rdev->pdev->device == 0x131C)) {
2674 - rdev->config.cik.max_cu_per_sh = 8;
2675 - rdev->config.cik.max_backends_per_se = 2;
2676 - } else if ((rdev->pdev->device == 0x1309) ||
2677 - (rdev->pdev->device == 0x130A) ||
2678 - (rdev->pdev->device == 0x130D) ||
2679 - (rdev->pdev->device == 0x1313) ||
2680 - (rdev->pdev->device == 0x131D)) {
2681 - rdev->config.cik.max_cu_per_sh = 6;
2682 - rdev->config.cik.max_backends_per_se = 2;
2683 - } else if ((rdev->pdev->device == 0x1306) ||
2684 - (rdev->pdev->device == 0x1307) ||
2685 - (rdev->pdev->device == 0x130B) ||
2686 - (rdev->pdev->device == 0x130E) ||
2687 - (rdev->pdev->device == 0x1315) ||
2688 - (rdev->pdev->device == 0x1318) ||
2689 - (rdev->pdev->device == 0x131B)) {
2690 - rdev->config.cik.max_cu_per_sh = 4;
2691 - rdev->config.cik.max_backends_per_se = 1;
2692 - } else {
2693 - rdev->config.cik.max_cu_per_sh = 3;
2694 - rdev->config.cik.max_backends_per_se = 1;
2695 - }
2696 + rdev->config.cik.max_cu_per_sh = 8;
2697 + rdev->config.cik.max_backends_per_se = 2;
2698 rdev->config.cik.max_sh_per_se = 1;
2699 rdev->config.cik.max_texture_channel_caches = 4;
2700 rdev->config.cik.max_gprs = 256;
2701 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2702 index 59dcefb2df3b..30e129684c7c 100644
2703 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
2704 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2705 @@ -900,9 +900,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
2706 enum drm_connector_status ret = connector_status_disconnected;
2707 int r;
2708
2709 - r = pm_runtime_get_sync(connector->dev->dev);
2710 - if (r < 0)
2711 - return connector_status_disconnected;
2712 + if (!drm_kms_helper_is_poll_worker()) {
2713 + r = pm_runtime_get_sync(connector->dev->dev);
2714 + if (r < 0)
2715 + return connector_status_disconnected;
2716 + }
2717
2718 if (encoder) {
2719 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2720 @@ -925,8 +927,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
2721 /* check acpi lid status ??? */
2722
2723 radeon_connector_update_scratch_regs(connector, ret);
2724 - pm_runtime_mark_last_busy(connector->dev->dev);
2725 - pm_runtime_put_autosuspend(connector->dev->dev);
2726 +
2727 + if (!drm_kms_helper_is_poll_worker()) {
2728 + pm_runtime_mark_last_busy(connector->dev->dev);
2729 + pm_runtime_put_autosuspend(connector->dev->dev);
2730 + }
2731 +
2732 return ret;
2733 }
2734
2735 @@ -1040,9 +1046,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
2736 enum drm_connector_status ret = connector_status_disconnected;
2737 int r;
2738
2739 - r = pm_runtime_get_sync(connector->dev->dev);
2740 - if (r < 0)
2741 - return connector_status_disconnected;
2742 + if (!drm_kms_helper_is_poll_worker()) {
2743 + r = pm_runtime_get_sync(connector->dev->dev);
2744 + if (r < 0)
2745 + return connector_status_disconnected;
2746 + }
2747
2748 encoder = radeon_best_single_encoder(connector);
2749 if (!encoder)
2750 @@ -1109,8 +1117,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
2751 radeon_connector_update_scratch_regs(connector, ret);
2752
2753 out:
2754 - pm_runtime_mark_last_busy(connector->dev->dev);
2755 - pm_runtime_put_autosuspend(connector->dev->dev);
2756 + if (!drm_kms_helper_is_poll_worker()) {
2757 + pm_runtime_mark_last_busy(connector->dev->dev);
2758 + pm_runtime_put_autosuspend(connector->dev->dev);
2759 + }
2760
2761 return ret;
2762 }
2763 @@ -1174,9 +1184,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
2764 if (!radeon_connector->dac_load_detect)
2765 return ret;
2766
2767 - r = pm_runtime_get_sync(connector->dev->dev);
2768 - if (r < 0)
2769 - return connector_status_disconnected;
2770 + if (!drm_kms_helper_is_poll_worker()) {
2771 + r = pm_runtime_get_sync(connector->dev->dev);
2772 + if (r < 0)
2773 + return connector_status_disconnected;
2774 + }
2775
2776 encoder = radeon_best_single_encoder(connector);
2777 if (!encoder)
2778 @@ -1188,8 +1200,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
2779 if (ret == connector_status_connected)
2780 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
2781 radeon_connector_update_scratch_regs(connector, ret);
2782 - pm_runtime_mark_last_busy(connector->dev->dev);
2783 - pm_runtime_put_autosuspend(connector->dev->dev);
2784 +
2785 + if (!drm_kms_helper_is_poll_worker()) {
2786 + pm_runtime_mark_last_busy(connector->dev->dev);
2787 + pm_runtime_put_autosuspend(connector->dev->dev);
2788 + }
2789 +
2790 return ret;
2791 }
2792
2793 @@ -1252,9 +1268,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
2794 enum drm_connector_status ret = connector_status_disconnected;
2795 bool dret = false, broken_edid = false;
2796
2797 - r = pm_runtime_get_sync(connector->dev->dev);
2798 - if (r < 0)
2799 - return connector_status_disconnected;
2800 + if (!drm_kms_helper_is_poll_worker()) {
2801 + r = pm_runtime_get_sync(connector->dev->dev);
2802 + if (r < 0)
2803 + return connector_status_disconnected;
2804 + }
2805
2806 if (radeon_connector->detected_hpd_without_ddc) {
2807 force = true;
2808 @@ -1437,8 +1455,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
2809 }
2810
2811 exit:
2812 - pm_runtime_mark_last_busy(connector->dev->dev);
2813 - pm_runtime_put_autosuspend(connector->dev->dev);
2814 + if (!drm_kms_helper_is_poll_worker()) {
2815 + pm_runtime_mark_last_busy(connector->dev->dev);
2816 + pm_runtime_put_autosuspend(connector->dev->dev);
2817 + }
2818
2819 return ret;
2820 }
2821 @@ -1689,9 +1709,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
2822 if (radeon_dig_connector->is_mst)
2823 return connector_status_disconnected;
2824
2825 - r = pm_runtime_get_sync(connector->dev->dev);
2826 - if (r < 0)
2827 - return connector_status_disconnected;
2828 + if (!drm_kms_helper_is_poll_worker()) {
2829 + r = pm_runtime_get_sync(connector->dev->dev);
2830 + if (r < 0)
2831 + return connector_status_disconnected;
2832 + }
2833
2834 if (!force && radeon_check_hpd_status_unchanged(connector)) {
2835 ret = connector->status;
2836 @@ -1778,8 +1800,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
2837 }
2838
2839 out:
2840 - pm_runtime_mark_last_busy(connector->dev->dev);
2841 - pm_runtime_put_autosuspend(connector->dev->dev);
2842 + if (!drm_kms_helper_is_poll_worker()) {
2843 + pm_runtime_mark_last_busy(connector->dev->dev);
2844 + pm_runtime_put_autosuspend(connector->dev->dev);
2845 + }
2846
2847 return ret;
2848 }
2849 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2850 index ffc10cadcf34..32b577c776b9 100644
2851 --- a/drivers/gpu/drm/radeon/radeon_device.c
2852 +++ b/drivers/gpu/drm/radeon/radeon_device.c
2853 @@ -1397,6 +1397,10 @@ int radeon_device_init(struct radeon_device *rdev,
2854 if ((rdev->flags & RADEON_IS_PCI) &&
2855 (rdev->family <= CHIP_RS740))
2856 rdev->need_dma32 = true;
2857 +#ifdef CONFIG_PPC64
2858 + if (rdev->family == CHIP_CEDAR)
2859 + rdev->need_dma32 = true;
2860 +#endif
2861
2862 dma_bits = rdev->need_dma32 ? 32 : 40;
2863 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
2864 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
2865 index 326ad068c15a..4b6542538ff9 100644
2866 --- a/drivers/gpu/drm/radeon/radeon_pm.c
2867 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
2868 @@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
2869 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
2870 static void radeon_pm_update_profile(struct radeon_device *rdev);
2871 static void radeon_pm_set_clocks(struct radeon_device *rdev);
2872 -static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
2873
2874 int radeon_pm_get_type_index(struct radeon_device *rdev,
2875 enum radeon_pm_state_type ps_type,
2876 @@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
2877 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
2878 }
2879 mutex_unlock(&rdev->pm.mutex);
2880 - /* allow new DPM state to be picked */
2881 - radeon_pm_compute_clocks_dpm(rdev);
2882 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
2883 if (rdev->pm.profile == PM_PROFILE_AUTO) {
2884 mutex_lock(&rdev->pm.mutex);
2885 @@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
2886 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
2887 /* balanced states don't exist at the moment */
2888 if (dpm_state == POWER_STATE_TYPE_BALANCED)
2889 - dpm_state = rdev->pm.dpm.ac_power ?
2890 - POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
2891 + dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2892
2893 restart_search:
2894 /* Pick the best power state based on current conditions */
2895 diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
2896 index d7d042a20ab4..4dff06ab771e 100644
2897 --- a/drivers/infiniband/core/device.c
2898 +++ b/drivers/infiniband/core/device.c
2899 @@ -534,14 +534,14 @@ int ib_register_device(struct ib_device *device,
2900 ret = device->query_device(device, &device->attrs, &uhw);
2901 if (ret) {
2902 pr_warn("Couldn't query the device attributes\n");
2903 - goto cache_cleanup;
2904 + goto cg_cleanup;
2905 }
2906
2907 ret = ib_device_register_sysfs(device, port_callback);
2908 if (ret) {
2909 pr_warn("Couldn't register device %s with driver model\n",
2910 device->name);
2911 - goto cache_cleanup;
2912 + goto cg_cleanup;
2913 }
2914
2915 device->reg_state = IB_DEV_REGISTERED;
2916 @@ -557,6 +557,8 @@ int ib_register_device(struct ib_device *device,
2917 mutex_unlock(&device_mutex);
2918 return 0;
2919
2920 +cg_cleanup:
2921 + ib_device_unregister_rdmacg(device);
2922 cache_cleanup:
2923 ib_cache_cleanup_one(device);
2924 ib_cache_release_one(device);
2925 diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
2926 index 4e1f76730855..9cb801d1fe54 100644
2927 --- a/drivers/infiniband/core/rdma_core.c
2928 +++ b/drivers/infiniband/core/rdma_core.c
2929 @@ -407,13 +407,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
2930 return ret;
2931 }
2932
2933 -static void lockdep_check(struct ib_uobject *uobj, bool exclusive)
2934 +static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
2935 {
2936 #ifdef CONFIG_LOCKDEP
2937 if (exclusive)
2938 - WARN_ON(atomic_read(&uobj->usecnt) > 0);
2939 + WARN_ON(atomic_read(&uobj->usecnt) != -1);
2940 else
2941 - WARN_ON(atomic_read(&uobj->usecnt) == -1);
2942 + WARN_ON(atomic_read(&uobj->usecnt) <= 0);
2943 #endif
2944 }
2945
2946 @@ -452,7 +452,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
2947 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
2948 return 0;
2949 }
2950 - lockdep_check(uobj, true);
2951 + assert_uverbs_usecnt(uobj, true);
2952 ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
2953
2954 up_read(&ucontext->cleanup_rwsem);
2955 @@ -482,7 +482,7 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
2956 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
2957 return 0;
2958 }
2959 - lockdep_check(uobject, true);
2960 + assert_uverbs_usecnt(uobject, true);
2961 ret = uobject->type->type_class->remove_commit(uobject,
2962 RDMA_REMOVE_DESTROY);
2963 if (ret)
2964 @@ -569,7 +569,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
2965
2966 void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
2967 {
2968 - lockdep_check(uobj, exclusive);
2969 + assert_uverbs_usecnt(uobj, exclusive);
2970 uobj->type->type_class->lookup_put(uobj, exclusive);
2971 /*
2972 * In order to unlock an object, either decrease its usecnt for
2973 diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
2974 index eb85b546e223..c8b3a45e9edc 100644
2975 --- a/drivers/infiniband/core/ucma.c
2976 +++ b/drivers/infiniband/core/ucma.c
2977 @@ -1148,6 +1148,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
2978 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
2979 return -EFAULT;
2980
2981 + if (cmd.qp_state > IB_QPS_ERR)
2982 + return -EINVAL;
2983 +
2984 ctx = ucma_get_ctx(file, cmd.id);
2985 if (IS_ERR(ctx))
2986 return PTR_ERR(ctx);
2987 @@ -1293,6 +1296,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
2988 if (IS_ERR(ctx))
2989 return PTR_ERR(ctx);
2990
2991 + if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
2992 + return -EINVAL;
2993 +
2994 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
2995 cmd.optlen);
2996 if (IS_ERR(optval)) {
2997 diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
2998 index 18705cbcdc8c..8b179238f405 100644
2999 --- a/drivers/infiniband/hw/mlx5/cq.c
3000 +++ b/drivers/infiniband/hw/mlx5/cq.c
3001 @@ -1177,7 +1177,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
3002 if (ucmd.reserved0 || ucmd.reserved1)
3003 return -EINVAL;
3004
3005 - umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
3006 + /* check multiplication overflow */
3007 + if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
3008 + return -EINVAL;
3009 +
3010 + umem = ib_umem_get(context, ucmd.buf_addr,
3011 + (size_t)ucmd.cqe_size * entries,
3012 IB_ACCESS_LOCAL_WRITE, 1);
3013 if (IS_ERR(umem)) {
3014 err = PTR_ERR(umem);
3015 diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
3016 index d109fe8290a7..3832edd867ed 100644
3017 --- a/drivers/infiniband/hw/mlx5/mr.c
3018 +++ b/drivers/infiniband/hw/mlx5/mr.c
3019 @@ -1813,7 +1813,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
3020
3021 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
3022 mr->ibmr.length = 0;
3023 - mr->ndescs = sg_nents;
3024
3025 for_each_sg(sgl, sg, sg_nents, i) {
3026 if (unlikely(i >= mr->max_descs))
3027 @@ -1825,6 +1824,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
3028
3029 sg_offset = 0;
3030 }
3031 + mr->ndescs = i;
3032
3033 if (sg_offset_p)
3034 *sg_offset_p = sg_offset;
3035 diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
3036 index 1f316d66e6f7..41614c185918 100644
3037 --- a/drivers/input/keyboard/matrix_keypad.c
3038 +++ b/drivers/input/keyboard/matrix_keypad.c
3039 @@ -218,8 +218,10 @@ static void matrix_keypad_stop(struct input_dev *dev)
3040 {
3041 struct matrix_keypad *keypad = input_get_drvdata(dev);
3042
3043 + spin_lock_irq(&keypad->lock);
3044 keypad->stopped = true;
3045 - mb();
3046 + spin_unlock_irq(&keypad->lock);
3047 +
3048 flush_work(&keypad->work.work);
3049 /*
3050 * matrix_keypad_scan() will leave IRQs enabled;
3051 diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
3052 index cd9f61cb3fc6..ee5466a374bf 100644
3053 --- a/drivers/input/mouse/synaptics.c
3054 +++ b/drivers/input/mouse/synaptics.c
3055 @@ -173,7 +173,6 @@ static const char * const smbus_pnp_ids[] = {
3056 "LEN0046", /* X250 */
3057 "LEN004a", /* W541 */
3058 "LEN200f", /* T450s */
3059 - "LEN2018", /* T460p */
3060 NULL
3061 };
3062
3063 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
3064 index b4d28928dec5..14bdaf1cef2c 100644
3065 --- a/drivers/md/bcache/super.c
3066 +++ b/drivers/md/bcache/super.c
3067 @@ -951,6 +951,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
3068 uint32_t rtime = cpu_to_le32(get_seconds());
3069 struct uuid_entry *u;
3070 char buf[BDEVNAME_SIZE];
3071 + struct cached_dev *exist_dc, *t;
3072
3073 bdevname(dc->bdev, buf);
3074
3075 @@ -974,6 +975,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
3076 return -EINVAL;
3077 }
3078
3079 + /* Check whether already attached */
3080 + list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
3081 + if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
3082 + pr_err("Tried to attach %s but duplicate UUID already attached",
3083 + buf);
3084 +
3085 + return -EINVAL;
3086 + }
3087 + }
3088 +
3089 u = uuid_find(c, dc->sb.uuid);
3090
3091 if (u &&
3092 @@ -1191,7 +1202,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
3093
3094 return;
3095 err:
3096 - pr_notice("error opening %s: %s", bdevname(bdev, name), err);
3097 + pr_notice("error %s: %s", bdevname(bdev, name), err);
3098 bcache_device_stop(&dc->disk);
3099 }
3100
3101 @@ -1859,6 +1870,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
3102 const char *err = NULL; /* must be set for any error case */
3103 int ret = 0;
3104
3105 + bdevname(bdev, name);
3106 +
3107 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
3108 ca->bdev = bdev;
3109 ca->bdev->bd_holder = ca;
3110 @@ -1867,11 +1880,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
3111 ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
3112 get_page(sb_page);
3113
3114 - if (blk_queue_discard(bdev_get_queue(ca->bdev)))
3115 + if (blk_queue_discard(bdev_get_queue(bdev)))
3116 ca->discard = CACHE_DISCARD(&ca->sb);
3117
3118 ret = cache_alloc(ca);
3119 if (ret != 0) {
3120 + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
3121 if (ret == -ENOMEM)
3122 err = "cache_alloc(): -ENOMEM";
3123 else
3124 @@ -1894,14 +1908,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
3125 goto out;
3126 }
3127
3128 - pr_info("registered cache device %s", bdevname(bdev, name));
3129 + pr_info("registered cache device %s", name);
3130
3131 out:
3132 kobject_put(&ca->kobj);
3133
3134 err:
3135 if (err)
3136 - pr_notice("error opening %s: %s", bdevname(bdev, name), err);
3137 + pr_notice("error %s: %s", name, err);
3138
3139 return ret;
3140 }
3141 @@ -1990,6 +2004,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
3142 if (err)
3143 goto err_close;
3144
3145 + err = "failed to register device";
3146 if (SB_IS_BDEV(sb)) {
3147 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
3148 if (!dc)
3149 @@ -2004,7 +2019,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
3150 goto err_close;
3151
3152 if (register_cache(sb, sb_page, bdev, ca) != 0)
3153 - goto err_close;
3154 + goto err;
3155 }
3156 out:
3157 if (sb_page)
3158 @@ -2017,7 +2032,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
3159 err_close:
3160 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
3161 err:
3162 - pr_info("error opening %s: %s", path, err);
3163 + pr_info("error %s: %s", path, err);
3164 ret = -EINVAL;
3165 goto out;
3166 }
3167 diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
3168 index c546b567f3b5..b3454e8c0956 100644
3169 --- a/drivers/md/dm-bufio.c
3170 +++ b/drivers/md/dm-bufio.c
3171 @@ -386,9 +386,6 @@ static void __cache_size_refresh(void)
3172 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
3173 enum data_mode *data_mode)
3174 {
3175 - unsigned noio_flag;
3176 - void *ptr;
3177 -
3178 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
3179 *data_mode = DATA_MODE_SLAB;
3180 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
3181 @@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
3182 * all allocations done by this process (including pagetables) are done
3183 * as if GFP_NOIO was specified.
3184 */
3185 + if (gfp_mask & __GFP_NORETRY) {
3186 + unsigned noio_flag = memalloc_noio_save();
3187 + void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
3188
3189 - if (gfp_mask & __GFP_NORETRY)
3190 - noio_flag = memalloc_noio_save();
3191 -
3192 - ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
3193 -
3194 - if (gfp_mask & __GFP_NORETRY)
3195 memalloc_noio_restore(noio_flag);
3196 + return ptr;
3197 + }
3198
3199 - return ptr;
3200 + return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
3201 }
3202
3203 /*
3204 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
3205 index f6d4a50f1bdb..829ac22b72fc 100644
3206 --- a/drivers/net/wireless/mac80211_hwsim.c
3207 +++ b/drivers/net/wireless/mac80211_hwsim.c
3208 @@ -3455,7 +3455,7 @@ static int __init init_mac80211_hwsim(void)
3209
3210 spin_lock_init(&hwsim_radio_lock);
3211
3212 - hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
3213 + hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);
3214 if (!hwsim_wq)
3215 return -ENOMEM;
3216
3217 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
3218 index 839650e0926a..3551fbd6fe41 100644
3219 --- a/drivers/nvme/host/core.c
3220 +++ b/drivers/nvme/host/core.c
3221 @@ -2950,7 +2950,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3222
3223 if (new)
3224 nvme_mpath_add_disk(ns->head);
3225 - nvme_mpath_add_disk_links(ns);
3226 return;
3227 out_unlink_ns:
3228 mutex_lock(&ctrl->subsys->lock);
3229 @@ -2970,7 +2969,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
3230 return;
3231
3232 if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
3233 - nvme_mpath_remove_disk_links(ns);
3234 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
3235 &nvme_ns_id_attr_group);
3236 if (ns->ndev)
3237 diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
3238 index 1218a9fca846..cf16905d25e2 100644
3239 --- a/drivers/nvme/host/multipath.c
3240 +++ b/drivers/nvme/host/multipath.c
3241 @@ -245,25 +245,6 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head)
3242 head->disk->disk_name);
3243 }
3244
3245 -void nvme_mpath_add_disk_links(struct nvme_ns *ns)
3246 -{
3247 - struct kobject *slave_disk_kobj, *holder_disk_kobj;
3248 -
3249 - if (!ns->head->disk)
3250 - return;
3251 -
3252 - slave_disk_kobj = &disk_to_dev(ns->disk)->kobj;
3253 - if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj,
3254 - kobject_name(slave_disk_kobj)))
3255 - return;
3256 -
3257 - holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj;
3258 - if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj,
3259 - kobject_name(holder_disk_kobj)))
3260 - sysfs_remove_link(ns->head->disk->slave_dir,
3261 - kobject_name(slave_disk_kobj));
3262 -}
3263 -
3264 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
3265 {
3266 if (!head->disk)
3267 @@ -278,14 +259,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
3268 blk_cleanup_queue(head->disk->queue);
3269 put_disk(head->disk);
3270 }
3271 -
3272 -void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
3273 -{
3274 - if (!ns->head->disk)
3275 - return;
3276 -
3277 - sysfs_remove_link(ns->disk->part0.holder_dir,
3278 - kobject_name(&disk_to_dev(ns->head->disk)->kobj));
3279 - sysfs_remove_link(ns->head->disk->slave_dir,
3280 - kobject_name(&disk_to_dev(ns->disk)->kobj));
3281 -}
3282 diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
3283 index a00eabd06427..55c49a1aa231 100644
3284 --- a/drivers/nvme/host/nvme.h
3285 +++ b/drivers/nvme/host/nvme.h
3286 @@ -405,9 +405,7 @@ bool nvme_req_needs_failover(struct request *req);
3287 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
3288 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
3289 void nvme_mpath_add_disk(struct nvme_ns_head *head);
3290 -void nvme_mpath_add_disk_links(struct nvme_ns *ns);
3291 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
3292 -void nvme_mpath_remove_disk_links(struct nvme_ns *ns);
3293
3294 static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
3295 {
3296 @@ -448,12 +446,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
3297 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
3298 {
3299 }
3300 -static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns)
3301 -{
3302 -}
3303 -static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
3304 -{
3305 -}
3306 static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
3307 {
3308 }
3309 diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
3310 index 81e2157a7cfb..bc3e2d8d0cce 100644
3311 --- a/drivers/pci/dwc/pcie-designware-host.c
3312 +++ b/drivers/pci/dwc/pcie-designware-host.c
3313 @@ -607,7 +607,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
3314 /* setup bus numbers */
3315 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
3316 val &= 0xff000000;
3317 - val |= 0x00010100;
3318 + val |= 0x00ff0100;
3319 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
3320
3321 /* setup command register */
3322 diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
3323 index 72c8b3e1022b..e0a9c445ed67 100644
3324 --- a/drivers/regulator/stm32-vrefbuf.c
3325 +++ b/drivers/regulator/stm32-vrefbuf.c
3326 @@ -51,7 +51,7 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev)
3327 * arbitrary timeout.
3328 */
3329 ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val,
3330 - !(val & STM32_VRR), 650, 10000);
3331 + val & STM32_VRR, 650, 10000);
3332 if (ret) {
3333 dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n");
3334 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
3335 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
3336 index 57bf43e34863..dd9464920456 100644
3337 --- a/drivers/scsi/hosts.c
3338 +++ b/drivers/scsi/hosts.c
3339 @@ -328,8 +328,6 @@ static void scsi_host_dev_release(struct device *dev)
3340 if (shost->work_q)
3341 destroy_workqueue(shost->work_q);
3342
3343 - destroy_rcu_head(&shost->rcu);
3344 -
3345 if (shost->shost_state == SHOST_CREATED) {
3346 /*
3347 * Free the shost_dev device name here if scsi_host_alloc()
3348 @@ -404,7 +402,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
3349 INIT_LIST_HEAD(&shost->starved_list);
3350 init_waitqueue_head(&shost->host_wait);
3351 mutex_init(&shost->scan_mutex);
3352 - init_rcu_head(&shost->rcu);
3353
3354 index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
3355 if (index < 0)
3356 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
3357 index 01a9b8971e88..93ff92e2363f 100644
3358 --- a/drivers/scsi/qla2xxx/qla_def.h
3359 +++ b/drivers/scsi/qla2xxx/qla_def.h
3360 @@ -315,6 +315,29 @@ struct srb_cmd {
3361 /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
3362 #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
3363
3364 +/*
3365 + * 24 bit port ID type definition.
3366 + */
3367 +typedef union {
3368 + uint32_t b24 : 24;
3369 +
3370 + struct {
3371 +#ifdef __BIG_ENDIAN
3372 + uint8_t domain;
3373 + uint8_t area;
3374 + uint8_t al_pa;
3375 +#elif defined(__LITTLE_ENDIAN)
3376 + uint8_t al_pa;
3377 + uint8_t area;
3378 + uint8_t domain;
3379 +#else
3380 +#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
3381 +#endif
3382 + uint8_t rsvd_1;
3383 + } b;
3384 +} port_id_t;
3385 +#define INVALID_PORT_ID 0xFFFFFF
3386 +
3387 struct els_logo_payload {
3388 uint8_t opcode;
3389 uint8_t rsvd[3];
3390 @@ -338,6 +361,7 @@ struct ct_arg {
3391 u32 rsp_size;
3392 void *req;
3393 void *rsp;
3394 + port_id_t id;
3395 };
3396
3397 /*
3398 @@ -499,6 +523,7 @@ typedef struct srb {
3399 const char *name;
3400 int iocbs;
3401 struct qla_qpair *qpair;
3402 + struct list_head elem;
3403 u32 gen1; /* scratch */
3404 u32 gen2; /* scratch */
3405 union {
3406 @@ -2164,28 +2189,6 @@ struct imm_ntfy_from_isp {
3407 #define REQUEST_ENTRY_SIZE (sizeof(request_t))
3408
3409
3410 -/*
3411 - * 24 bit port ID type definition.
3412 - */
3413 -typedef union {
3414 - uint32_t b24 : 24;
3415 -
3416 - struct {
3417 -#ifdef __BIG_ENDIAN
3418 - uint8_t domain;
3419 - uint8_t area;
3420 - uint8_t al_pa;
3421 -#elif defined(__LITTLE_ENDIAN)
3422 - uint8_t al_pa;
3423 - uint8_t area;
3424 - uint8_t domain;
3425 -#else
3426 -#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
3427 -#endif
3428 - uint8_t rsvd_1;
3429 - } b;
3430 -} port_id_t;
3431 -#define INVALID_PORT_ID 0xFFFFFF
3432
3433 /*
3434 * Switch info gathering structure.
3435 @@ -4107,6 +4110,7 @@ typedef struct scsi_qla_host {
3436 #define LOOP_READY 5
3437 #define LOOP_DEAD 6
3438
3439 + unsigned long relogin_jif;
3440 unsigned long dpc_flags;
3441 #define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
3442 #define RESET_ACTIVE 1
3443 @@ -4252,6 +4256,7 @@ typedef struct scsi_qla_host {
3444 uint8_t n2n_node_name[WWN_SIZE];
3445 uint8_t n2n_port_name[WWN_SIZE];
3446 uint16_t n2n_id;
3447 + struct list_head gpnid_list;
3448 } scsi_qla_host_t;
3449
3450 struct qla27xx_image_status {
3451 diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
3452 index bc3db6abc9a0..7d715e58901f 100644
3453 --- a/drivers/scsi/qla2xxx/qla_gs.c
3454 +++ b/drivers/scsi/qla2xxx/qla_gs.c
3455 @@ -175,6 +175,9 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
3456 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3457 }
3458 break;
3459 + case CS_TIMEOUT:
3460 + rval = QLA_FUNCTION_TIMEOUT;
3461 + /* drop through */
3462 default:
3463 ql_dbg(ql_dbg_disc, vha, 0x2033,
3464 "%s failed, completion status (%x) on port_id: "
3465 @@ -2833,7 +2836,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
3466 }
3467 } else { /* fcport->d_id.b24 != ea->id.b24 */
3468 fcport->d_id.b24 = ea->id.b24;
3469 - if (fcport->deleted == QLA_SESS_DELETED) {
3470 + if (fcport->deleted != QLA_SESS_DELETED) {
3471 ql_dbg(ql_dbg_disc, vha, 0x2021,
3472 "%s %d %8phC post del sess\n",
3473 __func__, __LINE__, fcport->port_name);
3474 @@ -2889,9 +2892,22 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
3475 ea.rc = res;
3476 ea.event = FCME_GIDPN_DONE;
3477
3478 - ql_dbg(ql_dbg_disc, vha, 0x204f,
3479 - "Async done-%s res %x, WWPN %8phC ID %3phC \n",
3480 - sp->name, res, fcport->port_name, id);
3481 + if (res == QLA_FUNCTION_TIMEOUT) {
3482 + ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3483 + "Async done-%s WWPN %8phC timed out.\n",
3484 + sp->name, fcport->port_name);
3485 + qla24xx_post_gidpn_work(sp->vha, fcport);
3486 + sp->free(sp);
3487 + return;
3488 + } else if (res) {
3489 + ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3490 + "Async done-%s fail res %x, WWPN %8phC\n",
3491 + sp->name, res, fcport->port_name);
3492 + } else {
3493 + ql_dbg(ql_dbg_disc, vha, 0x204f,
3494 + "Async done-%s good WWPN %8phC ID %3phC\n",
3495 + sp->name, fcport->port_name, id);
3496 + }
3497
3498 qla2x00_fcport_event_handler(vha, &ea);
3499
3500 @@ -3155,43 +3171,136 @@ void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
3501
3502 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3503 {
3504 - fc_port_t *fcport;
3505 - unsigned long flags;
3506 + fc_port_t *fcport, *conflict, *t;
3507
3508 - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3509 - fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3510 - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3511 + ql_dbg(ql_dbg_disc, vha, 0xffff,
3512 + "%s %d port_id: %06x\n",
3513 + __func__, __LINE__, ea->id.b24);
3514
3515 - if (fcport) {
3516 - /* cable moved. just plugged in */
3517 - fcport->rscn_gen++;
3518 - fcport->d_id = ea->id;
3519 - fcport->scan_state = QLA_FCPORT_FOUND;
3520 - fcport->flags |= FCF_FABRIC_DEVICE;
3521 -
3522 - switch (fcport->disc_state) {
3523 - case DSC_DELETED:
3524 - ql_dbg(ql_dbg_disc, vha, 0x210d,
3525 - "%s %d %8phC login\n", __func__, __LINE__,
3526 - fcport->port_name);
3527 - qla24xx_fcport_handle_login(vha, fcport);
3528 - break;
3529 - case DSC_DELETE_PEND:
3530 - break;
3531 - default:
3532 - ql_dbg(ql_dbg_disc, vha, 0x2064,
3533 - "%s %d %8phC post del sess\n",
3534 - __func__, __LINE__, fcport->port_name);
3535 - qlt_schedule_sess_for_deletion_lock(fcport);
3536 - break;
3537 + if (ea->rc) {
3538 + /* cable is disconnected */
3539 + list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3540 + if (fcport->d_id.b24 == ea->id.b24) {
3541 + ql_dbg(ql_dbg_disc, vha, 0xffff,
3542 + "%s %d %8phC DS %d\n",
3543 + __func__, __LINE__,
3544 + fcport->port_name,
3545 + fcport->disc_state);
3546 + fcport->scan_state = QLA_FCPORT_SCAN;
3547 + switch (fcport->disc_state) {
3548 + case DSC_DELETED:
3549 + case DSC_DELETE_PEND:
3550 + break;
3551 + default:
3552 + ql_dbg(ql_dbg_disc, vha, 0xffff,
3553 + "%s %d %8phC post del sess\n",
3554 + __func__, __LINE__,
3555 + fcport->port_name);
3556 + qlt_schedule_sess_for_deletion_lock
3557 + (fcport);
3558 + break;
3559 + }
3560 + }
3561 }
3562 } else {
3563 - /* create new fcport */
3564 - ql_dbg(ql_dbg_disc, vha, 0x2065,
3565 - "%s %d %8phC post new sess\n",
3566 - __func__, __LINE__, ea->port_name);
3567 + /* cable is connected */
3568 + fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3569 + if (fcport) {
3570 + list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3571 + list) {
3572 + if ((conflict->d_id.b24 == ea->id.b24) &&
3573 + (fcport != conflict)) {
3574 + /* 2 fcports with conflict Nport ID or
3575 + * an existing fcport is having nport ID
3576 + * conflict with new fcport.
3577 + */
3578 +
3579 + ql_dbg(ql_dbg_disc, vha, 0xffff,
3580 + "%s %d %8phC DS %d\n",
3581 + __func__, __LINE__,
3582 + conflict->port_name,
3583 + conflict->disc_state);
3584 + conflict->scan_state = QLA_FCPORT_SCAN;
3585 + switch (conflict->disc_state) {
3586 + case DSC_DELETED:
3587 + case DSC_DELETE_PEND:
3588 + break;
3589 + default:
3590 + ql_dbg(ql_dbg_disc, vha, 0xffff,
3591 + "%s %d %8phC post del sess\n",
3592 + __func__, __LINE__,
3593 + conflict->port_name);
3594 + qlt_schedule_sess_for_deletion_lock
3595 + (conflict);
3596 + break;
3597 + }
3598 + }
3599 + }
3600
3601 - qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL);
3602 + fcport->rscn_gen++;
3603 + fcport->scan_state = QLA_FCPORT_FOUND;
3604 + fcport->flags |= FCF_FABRIC_DEVICE;
3605 + switch (fcport->disc_state) {
3606 + case DSC_LOGIN_COMPLETE:
3607 + /* recheck session is still intact. */
3608 + ql_dbg(ql_dbg_disc, vha, 0x210d,
3609 + "%s %d %8phC revalidate session with ADISC\n",
3610 + __func__, __LINE__, fcport->port_name);
3611 + qla24xx_post_gpdb_work(vha, fcport,
3612 + PDO_FORCE_ADISC);
3613 + break;
3614 + case DSC_DELETED:
3615 + ql_dbg(ql_dbg_disc, vha, 0x210d,
3616 + "%s %d %8phC login\n", __func__, __LINE__,
3617 + fcport->port_name);
3618 + fcport->d_id = ea->id;
3619 + qla24xx_fcport_handle_login(vha, fcport);
3620 + break;
3621 + case DSC_DELETE_PEND:
3622 + fcport->d_id = ea->id;
3623 + break;
3624 + default:
3625 + fcport->d_id = ea->id;
3626 + break;
3627 + }
3628 + } else {
3629 + list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3630 + list) {
3631 + if (conflict->d_id.b24 == ea->id.b24) {
3632 + /* 2 fcports with conflict Nport ID or
3633 + * an existing fcport is having nport ID
3634 + * conflict with new fcport.
3635 + */
3636 + ql_dbg(ql_dbg_disc, vha, 0xffff,
3637 + "%s %d %8phC DS %d\n",
3638 + __func__, __LINE__,
3639 + conflict->port_name,
3640 + conflict->disc_state);
3641 +
3642 + conflict->scan_state = QLA_FCPORT_SCAN;
3643 + switch (conflict->disc_state) {
3644 + case DSC_DELETED:
3645 + case DSC_DELETE_PEND:
3646 + break;
3647 + default:
3648 + ql_dbg(ql_dbg_disc, vha, 0xffff,
3649 + "%s %d %8phC post del sess\n",
3650 + __func__, __LINE__,
3651 + conflict->port_name);
3652 + qlt_schedule_sess_for_deletion_lock
3653 + (conflict);
3654 + break;
3655 + }
3656 + }
3657 + }
3658 +
3659 + /* create new fcport */
3660 + ql_dbg(ql_dbg_disc, vha, 0x2065,
3661 + "%s %d %8phC post new sess\n",
3662 + __func__, __LINE__, ea->port_name);
3663 + qla24xx_post_newsess_work(vha, &ea->id,
3664 + ea->port_name, NULL);
3665 + }
3666 }
3667 }
3668
3669 @@ -3205,11 +3314,18 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
3670 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3671 struct event_arg ea;
3672 struct qla_work_evt *e;
3673 + unsigned long flags;
3674
3675 - ql_dbg(ql_dbg_disc, vha, 0x2066,
3676 - "Async done-%s res %x ID %3phC. %8phC\n",
3677 - sp->name, res, ct_req->req.port_id.port_id,
3678 - ct_rsp->rsp.gpn_id.port_name);
3679 + if (res)
3680 + ql_dbg(ql_dbg_disc, vha, 0x2066,
3681 + "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3682 + sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
3683 + ct_rsp->rsp.gpn_id.port_name);
3684 + else
3685 + ql_dbg(ql_dbg_disc, vha, 0x2066,
3686 + "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3687 + sp->name, sp->gen1, ct_req->req.port_id.port_id,
3688 + ct_rsp->rsp.gpn_id.port_name);
3689
3690 memset(&ea, 0, sizeof(ea));
3691 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3692 @@ -3220,6 +3336,23 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
3693 ea.rc = res;
3694 ea.event = FCME_GPNID_DONE;
3695
3696 + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3697 + list_del(&sp->elem);
3698 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3699 +
3700 + if (res) {
3701 + if (res == QLA_FUNCTION_TIMEOUT) {
3702 + qla24xx_post_gpnid_work(sp->vha, &ea.id);
3703 + sp->free(sp);
3704 + return;
3705 + }
3706 + } else if (sp->gen1) {
3707 + /* There was another RSCN for this Nport ID */
3708 + qla24xx_post_gpnid_work(sp->vha, &ea.id);
3709 + sp->free(sp);
3710 + return;
3711 + }
3712 +
3713 qla2x00_fcport_event_handler(vha, &ea);
3714
3715 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE);
3716 @@ -3253,8 +3386,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3717 {
3718 int rval = QLA_FUNCTION_FAILED;
3719 struct ct_sns_req *ct_req;
3720 - srb_t *sp;
3721 + srb_t *sp, *tsp;
3722 struct ct_sns_pkt *ct_sns;
3723 + unsigned long flags;
3724
3725 if (!vha->flags.online)
3726 goto done;
3727 @@ -3265,8 +3399,22 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3728
3729 sp->type = SRB_CT_PTHRU_CMD;
3730 sp->name = "gpnid";
3731 + sp->u.iocb_cmd.u.ctarg.id = *id;
3732 + sp->gen1 = 0;
3733 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3734
3735 + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3736 + list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3737 + if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3738 + tsp->gen1++;
3739 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3740 + sp->free(sp);
3741 + goto done;
3742 + }
3743 + }
3744 + list_add_tail(&sp->elem, &vha->gpnid_list);
3745 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3746 +
3747 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3748 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3749 GFP_KERNEL);
3750 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
3751 index 1bafa043f9f1..6082389f25c3 100644
3752 --- a/drivers/scsi/qla2xxx/qla_init.c
3753 +++ b/drivers/scsi/qla2xxx/qla_init.c
3754 @@ -863,6 +863,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
3755 int rval = ea->rc;
3756 fc_port_t *fcport = ea->fcport;
3757 unsigned long flags;
3758 + u16 opt = ea->sp->u.iocb_cmd.u.mbx.out_mb[10];
3759
3760 fcport->flags &= ~FCF_ASYNC_SENT;
3761
3762 @@ -893,7 +894,8 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
3763 }
3764
3765 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3766 - ea->fcport->login_gen++;
3767 + if (opt != PDO_FORCE_ADISC)
3768 + ea->fcport->login_gen++;
3769 ea->fcport->deleted = 0;
3770 ea->fcport->logout_on_delete = 1;
3771
3772 @@ -917,6 +919,16 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
3773
3774 qla24xx_post_gpsc_work(vha, fcport);
3775 }
3776 + } else if (ea->fcport->login_succ) {
3777 + /*
3778 + * We have an existing session. A late RSCN delivery
3779 + * must have triggered the session to be re-validate.
3780 + * session is still valid.
3781 + */
3782 + ql_dbg(ql_dbg_disc, vha, 0x20d6,
3783 + "%s %d %8phC session revalidate success\n",
3784 + __func__, __LINE__, fcport->port_name);
3785 + fcport->disc_state = DSC_LOGIN_COMPLETE;
3786 }
3787 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3788 } /* gpdb event */
3789 @@ -963,7 +975,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
3790 ql_dbg(ql_dbg_disc, vha, 0x20bd,
3791 "%s %d %8phC post gnl\n",
3792 __func__, __LINE__, fcport->port_name);
3793 - qla24xx_async_gnl(vha, fcport);
3794 + qla24xx_post_gnl_work(vha, fcport);
3795 } else {
3796 ql_dbg(ql_dbg_disc, vha, 0x20bf,
3797 "%s %d %8phC post login\n",
3798 @@ -1040,9 +1052,8 @@ void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
3799 switch (fcport->disc_state) {
3800 case DSC_DELETED:
3801 case DSC_LOGIN_COMPLETE:
3802 - qla24xx_post_gidpn_work(fcport->vha, fcport);
3803 + qla24xx_post_gpnid_work(fcport->vha, &ea->id);
3804 break;
3805 -
3806 default:
3807 break;
3808 }
3809 @@ -1132,7 +1143,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
3810 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
3811 __func__, __LINE__, fcport->port_name);
3812
3813 - qla24xx_async_gidpn(vha, fcport);
3814 + qla24xx_post_gidpn_work(vha, fcport);
3815 return;
3816 }
3817
3818 @@ -1347,6 +1358,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
3819 srb_t *sp = ptr;
3820 struct srb_iocb *abt = &sp->u.iocb_cmd;
3821
3822 + del_timer(&sp->u.iocb_cmd.timer);
3823 complete(&abt->u.abt.comp);
3824 }
3825
3826 @@ -1452,6 +1464,8 @@ static void
3827 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
3828 {
3829 port_id_t cid; /* conflict Nport id */
3830 + u16 lid;
3831 + struct fc_port *conflict_fcport;
3832
3833 switch (ea->data[0]) {
3834 case MBS_COMMAND_COMPLETE:
3835 @@ -1467,8 +1481,12 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
3836 qla24xx_post_prli_work(vha, ea->fcport);
3837 } else {
3838 ql_dbg(ql_dbg_disc, vha, 0x20ea,
3839 - "%s %d %8phC post gpdb\n",
3840 - __func__, __LINE__, ea->fcport->port_name);
3841 + "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
3842 + __func__, __LINE__, ea->fcport->port_name,
3843 + ea->fcport->loop_id, ea->fcport->d_id.b24);
3844 +
3845 + set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
3846 + ea->fcport->loop_id = FC_NO_LOOP_ID;
3847 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
3848 ea->fcport->logout_on_delete = 1;
3849 ea->fcport->send_els_logo = 0;
3850 @@ -1513,8 +1531,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
3851 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
3852 ea->fcport->d_id.b.al_pa);
3853
3854 - qla2x00_clear_loop_id(ea->fcport);
3855 - qla24xx_post_gidpn_work(vha, ea->fcport);
3856 + lid = ea->iop[1] & 0xffff;
3857 + qlt_find_sess_invalidate_other(vha,
3858 + wwn_to_u64(ea->fcport->port_name),
3859 + ea->fcport->d_id, lid, &conflict_fcport);
3860 +
3861 + if (conflict_fcport) {
3862 + /*
3863 + * Another fcport share the same loop_id/nport id.
3864 + * Conflict fcport needs to finish cleanup before this
3865 + * fcport can proceed to login.
3866 + */
3867 + conflict_fcport->conflict = ea->fcport;
3868 + ea->fcport->login_pause = 1;
3869 +
3870 + ql_dbg(ql_dbg_disc, vha, 0x20ed,
3871 + "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
3872 + __func__, __LINE__, ea->fcport->port_name,
3873 + ea->fcport->d_id.b24, lid);
3874 + qla2x00_clear_loop_id(ea->fcport);
3875 + qla24xx_post_gidpn_work(vha, ea->fcport);
3876 + } else {
3877 + ql_dbg(ql_dbg_disc, vha, 0x20ed,
3878 + "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
3879 + __func__, __LINE__, ea->fcport->port_name,
3880 + ea->fcport->d_id.b24, lid);
3881 +
3882 + qla2x00_clear_loop_id(ea->fcport);
3883 + set_bit(lid, vha->hw->loop_id_map);
3884 + ea->fcport->loop_id = lid;
3885 + ea->fcport->keep_nport_handle = 0;
3886 + qlt_schedule_sess_for_deletion(ea->fcport, false);
3887 + }
3888 break;
3889 }
3890 return;
3891 @@ -8173,9 +8221,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
3892 int ret = QLA_FUNCTION_FAILED;
3893 struct qla_hw_data *ha = qpair->hw;
3894
3895 - if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
3896 - goto fail;
3897 -
3898 qpair->delete_in_progress = 1;
3899 while (atomic_read(&qpair->ref_count))
3900 msleep(500);
3901 @@ -8183,6 +8228,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
3902 ret = qla25xx_delete_req_que(vha, qpair->req);
3903 if (ret != QLA_SUCCESS)
3904 goto fail;
3905 +
3906 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
3907 if (ret != QLA_SUCCESS)
3908 goto fail;
3909 diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
3910 index d810a447cb4a..8ea59586f4f1 100644
3911 --- a/drivers/scsi/qla2xxx/qla_iocb.c
3912 +++ b/drivers/scsi/qla2xxx/qla_iocb.c
3913 @@ -2392,26 +2392,13 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
3914 srb_t *sp = data;
3915 fc_port_t *fcport = sp->fcport;
3916 struct scsi_qla_host *vha = sp->vha;
3917 - struct qla_hw_data *ha = vha->hw;
3918 struct srb_iocb *lio = &sp->u.iocb_cmd;
3919 - unsigned long flags = 0;
3920
3921 ql_dbg(ql_dbg_io, vha, 0x3069,
3922 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
3923 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
3924 fcport->d_id.b.al_pa);
3925
3926 - /* Abort the exchange */
3927 - spin_lock_irqsave(&ha->hardware_lock, flags);
3928 - if (ha->isp_ops->abort_command(sp)) {
3929 - ql_dbg(ql_dbg_io, vha, 0x3070,
3930 - "mbx abort_command failed.\n");
3931 - } else {
3932 - ql_dbg(ql_dbg_io, vha, 0x3071,
3933 - "mbx abort_command success.\n");
3934 - }
3935 - spin_unlock_irqrestore(&ha->hardware_lock, flags);
3936 -
3937 complete(&lio->u.els_logo.comp);
3938 }
3939
3940 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
3941 index 2fd79129bb2a..85382387a52b 100644
3942 --- a/drivers/scsi/qla2xxx/qla_isr.c
3943 +++ b/drivers/scsi/qla2xxx/qla_isr.c
3944 @@ -1574,7 +1574,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
3945 /* borrowing sts_entry_24xx.comp_status.
3946 same location as ct_entry_24xx.comp_status
3947 */
3948 - res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
3949 + res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
3950 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
3951 sp->name);
3952 sp->done(sp, res);
3953 @@ -2369,7 +2369,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3954 int res = 0;
3955 uint16_t state_flags = 0;
3956 uint16_t retry_delay = 0;
3957 - uint8_t no_logout = 0;
3958
3959 sts = (sts_entry_t *) pkt;
3960 sts24 = (struct sts_entry_24xx *) pkt;
3961 @@ -2640,7 +2639,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3962 break;
3963
3964 case CS_PORT_LOGGED_OUT:
3965 - no_logout = 1;
3966 case CS_PORT_CONFIG_CHG:
3967 case CS_PORT_BUSY:
3968 case CS_INCOMPLETE:
3969 @@ -2671,9 +2669,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3970 port_state_str[atomic_read(&fcport->state)],
3971 comp_status);
3972
3973 - if (no_logout)
3974 - fcport->logout_on_delete = 0;
3975 -
3976 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
3977 qlt_schedule_sess_for_deletion_lock(fcport);
3978 }
3979 diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
3980 index cb717d47339f..e2b5fa47bb57 100644
3981 --- a/drivers/scsi/qla2xxx/qla_mbx.c
3982 +++ b/drivers/scsi/qla2xxx/qla_mbx.c
3983 @@ -6160,8 +6160,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
3984 }
3985
3986 /* Check for logged in state. */
3987 - if (current_login_state != PDS_PRLI_COMPLETE &&
3988 - last_login_state != PDS_PRLI_COMPLETE) {
3989 + if (current_login_state != PDS_PRLI_COMPLETE) {
3990 ql_dbg(ql_dbg_mbx, vha, 0x119a,
3991 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
3992 current_login_state, last_login_state, fcport->loop_id);
3993 diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
3994 index bd9f14bf7ac2..e538e6308885 100644
3995 --- a/drivers/scsi/qla2xxx/qla_mid.c
3996 +++ b/drivers/scsi/qla2xxx/qla_mid.c
3997 @@ -343,15 +343,21 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
3998 "FCPort update end.\n");
3999 }
4000
4001 - if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
4002 - !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
4003 - atomic_read(&vha->loop_state) != LOOP_DOWN) {
4004 -
4005 - ql_dbg(ql_dbg_dpc, vha, 0x4018,
4006 - "Relogin needed scheduled.\n");
4007 - qla2x00_relogin(vha);
4008 - ql_dbg(ql_dbg_dpc, vha, 0x4019,
4009 - "Relogin needed end.\n");
4010 + if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
4011 + !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
4012 + atomic_read(&vha->loop_state) != LOOP_DOWN) {
4013 +
4014 + if (!vha->relogin_jif ||
4015 + time_after_eq(jiffies, vha->relogin_jif)) {
4016 + vha->relogin_jif = jiffies + HZ;
4017 + clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4018 +
4019 + ql_dbg(ql_dbg_dpc, vha, 0x4018,
4020 + "Relogin needed scheduled.\n");
4021 + qla2x00_relogin(vha);
4022 + ql_dbg(ql_dbg_dpc, vha, 0x4019,
4023 + "Relogin needed end.\n");
4024 + }
4025 }
4026
4027 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
4028 @@ -569,14 +575,15 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4029 int
4030 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
4031 {
4032 - int ret = -1;
4033 + int ret = QLA_SUCCESS;
4034
4035 - if (req) {
4036 + if (req && vha->flags.qpairs_req_created) {
4037 req->options |= BIT_0;
4038 ret = qla25xx_init_req_que(vha, req);
4039 + if (ret != QLA_SUCCESS)
4040 + return QLA_FUNCTION_FAILED;
4041 }
4042 - if (ret == QLA_SUCCESS)
4043 - qla25xx_free_req_que(vha, req);
4044 + qla25xx_free_req_que(vha, req);
4045
4046 return ret;
4047 }
4048 @@ -584,14 +591,15 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
4049 int
4050 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4051 {
4052 - int ret = -1;
4053 + int ret = QLA_SUCCESS;
4054
4055 - if (rsp) {
4056 + if (rsp && vha->flags.qpairs_rsp_created) {
4057 rsp->options |= BIT_0;
4058 ret = qla25xx_init_rsp_que(vha, rsp);
4059 + if (ret != QLA_SUCCESS)
4060 + return QLA_FUNCTION_FAILED;
4061 }
4062 - if (ret == QLA_SUCCESS)
4063 - qla25xx_free_rsp_que(vha, rsp);
4064 + qla25xx_free_rsp_que(vha, rsp);
4065
4066 return ret;
4067 }
4068 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
4069 index 46f2d0cf7c0d..1f69e89b950f 100644
4070 --- a/drivers/scsi/qla2xxx/qla_os.c
4071 +++ b/drivers/scsi/qla2xxx/qla_os.c
4072 @@ -3011,9 +3011,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4073 base_vha = qla2x00_create_host(sht, ha);
4074 if (!base_vha) {
4075 ret = -ENOMEM;
4076 - qla2x00_mem_free(ha);
4077 - qla2x00_free_req_que(ha, req);
4078 - qla2x00_free_rsp_que(ha, rsp);
4079 goto probe_hw_failed;
4080 }
4081
4082 @@ -3074,7 +3071,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4083 /* Set up the irqs */
4084 ret = qla2x00_request_irqs(ha, rsp);
4085 if (ret)
4086 - goto probe_init_failed;
4087 + goto probe_hw_failed;
4088
4089 /* Alloc arrays of request and response ring ptrs */
4090 if (!qla2x00_alloc_queues(ha, req, rsp)) {
4091 @@ -3193,10 +3190,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4092 host->can_queue, base_vha->req,
4093 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
4094
4095 + ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
4096 +
4097 if (ha->mqenable) {
4098 bool mq = false;
4099 bool startit = false;
4100 - ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
4101
4102 if (QLA_TGT_MODE_ENABLED()) {
4103 mq = true;
4104 @@ -3390,6 +3388,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4105 scsi_host_put(base_vha->host);
4106
4107 probe_hw_failed:
4108 + qla2x00_mem_free(ha);
4109 + qla2x00_free_req_que(ha, req);
4110 + qla2x00_free_rsp_que(ha, rsp);
4111 qla2x00_clear_drv_active(ha);
4112
4113 iospace_config_failed:
4114 @@ -4514,6 +4515,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4115 INIT_LIST_HEAD(&vha->qp_list);
4116 INIT_LIST_HEAD(&vha->gnl.fcports);
4117 INIT_LIST_HEAD(&vha->nvme_rport_list);
4118 + INIT_LIST_HEAD(&vha->gpnid_list);
4119
4120 spin_lock_init(&vha->work_lock);
4121 spin_lock_init(&vha->cmd_list_lock);
4122 @@ -4748,20 +4750,49 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4123 } else {
4124 list_add_tail(&fcport->list, &vha->vp_fcports);
4125
4126 - if (pla) {
4127 - qlt_plogi_ack_link(vha, pla, fcport,
4128 - QLT_PLOGI_LINK_SAME_WWN);
4129 - pla->ref_count--;
4130 - }
4131 + }
4132 + if (pla) {
4133 + qlt_plogi_ack_link(vha, pla, fcport,
4134 + QLT_PLOGI_LINK_SAME_WWN);
4135 + pla->ref_count--;
4136 }
4137 }
4138 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4139
4140 if (fcport) {
4141 - if (pla)
4142 + if (pla) {
4143 qlt_plogi_ack_unref(vha, pla);
4144 - else
4145 - qla24xx_async_gffid(vha, fcport);
4146 + } else {
4147 + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4148 + tfcp = qla2x00_find_fcport_by_nportid(vha,
4149 + &e->u.new_sess.id, 1);
4150 + if (tfcp && (tfcp != fcport)) {
4151 + /*
4152 + * We have a conflict fcport with same NportID.
4153 + */
4154 + ql_dbg(ql_dbg_disc, vha, 0xffff,
4155 + "%s %8phC found conflict b4 add. DS %d LS %d\n",
4156 + __func__, tfcp->port_name, tfcp->disc_state,
4157 + tfcp->fw_login_state);
4158 +
4159 + switch (tfcp->disc_state) {
4160 + case DSC_DELETED:
4161 + break;
4162 + case DSC_DELETE_PEND:
4163 + fcport->login_pause = 1;
4164 + tfcp->conflict = fcport;
4165 + break;
4166 + default:
4167 + fcport->login_pause = 1;
4168 + tfcp->conflict = fcport;
4169 + qlt_schedule_sess_for_deletion_lock
4170 + (tfcp);
4171 + break;
4172 + }
4173 + }
4174 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4175 + qla24xx_async_gnl(vha, fcport);
4176 + }
4177 }
4178
4179 if (free_fcport) {
4180 @@ -4874,7 +4905,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
4181 */
4182 if (atomic_read(&fcport->state) != FCS_ONLINE &&
4183 fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
4184 - fcport->login_retry--;
4185 +
4186 if (fcport->flags & FCF_FABRIC_DEVICE) {
4187 ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
4188 "%s %8phC DS %d LS %d\n", __func__,
4189 @@ -4885,6 +4916,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
4190 ea.fcport = fcport;
4191 qla2x00_fcport_event_handler(vha, &ea);
4192 } else {
4193 + fcport->login_retry--;
4194 status = qla2x00_local_device_login(vha,
4195 fcport);
4196 if (status == QLA_SUCCESS) {
4197 @@ -5867,16 +5899,21 @@ qla2x00_do_dpc(void *data)
4198 }
4199
4200 /* Retry each device up to login retry count */
4201 - if ((test_and_clear_bit(RELOGIN_NEEDED,
4202 - &base_vha->dpc_flags)) &&
4203 + if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
4204 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
4205 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
4206
4207 - ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
4208 - "Relogin scheduled.\n");
4209 - qla2x00_relogin(base_vha);
4210 - ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
4211 - "Relogin end.\n");
4212 + if (!base_vha->relogin_jif ||
4213 + time_after_eq(jiffies, base_vha->relogin_jif)) {
4214 + base_vha->relogin_jif = jiffies + HZ;
4215 + clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
4216 +
4217 + ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
4218 + "Relogin scheduled.\n");
4219 + qla2x00_relogin(base_vha);
4220 + ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
4221 + "Relogin end.\n");
4222 + }
4223 }
4224 loop_resync_check:
4225 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
4226 @@ -6608,9 +6645,14 @@ qla83xx_disable_laser(scsi_qla_host_t *vha)
4227
4228 static int qla2xxx_map_queues(struct Scsi_Host *shost)
4229 {
4230 + int rc;
4231 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
4232
4233 - return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
4234 + if (USER_CTRL_IRQ(vha->hw))
4235 + rc = blk_mq_map_queues(&shost->tag_set);
4236 + else
4237 + rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
4238 + return rc;
4239 }
4240
4241 static const struct pci_error_handlers qla2xxx_err_handler = {
4242 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
4243 index 18069edd4773..cb35bb1ae305 100644
4244 --- a/drivers/scsi/qla2xxx/qla_target.c
4245 +++ b/drivers/scsi/qla2xxx/qla_target.c
4246 @@ -665,7 +665,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
4247 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
4248
4249 sp->u.iocb_cmd.u.nack.ntfy = ntfy;
4250 -
4251 + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4252 sp->done = qla2x00_async_nack_sp_done;
4253
4254 rval = qla2x00_start_sp(sp);
4255 @@ -890,6 +890,17 @@ qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
4256 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
4257 pla->ref_count, pla, link);
4258
4259 + if (link == QLT_PLOGI_LINK_CONFLICT) {
4260 + switch (sess->disc_state) {
4261 + case DSC_DELETED:
4262 + case DSC_DELETE_PEND:
4263 + pla->ref_count--;
4264 + return;
4265 + default:
4266 + break;
4267 + }
4268 + }
4269 +
4270 if (sess->plogi_link[link])
4271 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
4272
4273 @@ -974,7 +985,7 @@ static void qlt_free_session_done(struct work_struct *work)
4274 qlt_send_first_logo(vha, &logo);
4275 }
4276
4277 - if (sess->logout_on_delete) {
4278 + if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
4279 int rc;
4280
4281 rc = qla2x00_post_async_logout_work(vha, sess, NULL);
4282 @@ -1033,8 +1044,7 @@ static void qlt_free_session_done(struct work_struct *work)
4283 sess->login_succ = 0;
4284 }
4285
4286 - if (sess->chip_reset != ha->base_qpair->chip_reset)
4287 - qla2x00_clear_loop_id(sess);
4288 + qla2x00_clear_loop_id(sess);
4289
4290 if (sess->conflict) {
4291 sess->conflict->login_pause = 0;
4292 @@ -1205,7 +1215,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess,
4293 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
4294 "Scheduling sess %p for deletion\n", sess);
4295
4296 - schedule_work(&sess->del_work);
4297 + INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
4298 + queue_work(sess->vha->hw->wq, &sess->del_work);
4299 }
4300
4301 void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
4302 @@ -1560,8 +1571,11 @@ static void qlt_release(struct qla_tgt *tgt)
4303
4304 btree_destroy64(&tgt->lun_qpair_map);
4305
4306 - if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target)
4307 - ha->tgt.tgt_ops->remove_target(vha);
4308 + if (vha->vp_idx)
4309 + if (ha->tgt.tgt_ops &&
4310 + ha->tgt.tgt_ops->remove_target &&
4311 + vha->vha_tgt.target_lport_ptr)
4312 + ha->tgt.tgt_ops->remove_target(vha);
4313
4314 vha->vha_tgt.qla_tgt = NULL;
4315
4316 @@ -3708,7 +3722,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
4317 term = 1;
4318
4319 if (term)
4320 - qlt_term_ctio_exchange(qpair, ctio, cmd, status);
4321 + qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
4322
4323 return term;
4324 }
4325 @@ -4584,9 +4598,9 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4326 "Invalidating sess %p loop_id %d wwn %llx.\n",
4327 other_sess, other_sess->loop_id, other_wwn);
4328
4329 -
4330 other_sess->keep_nport_handle = 1;
4331 - *conflict_sess = other_sess;
4332 + if (other_sess->disc_state != DSC_DELETED)
4333 + *conflict_sess = other_sess;
4334 qlt_schedule_sess_for_deletion(other_sess,
4335 true);
4336 }
4337 @@ -4733,6 +4747,10 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4338 sess->d_id = port_id;
4339 sess->login_gen++;
4340
4341 + ql_dbg(ql_dbg_disc, vha, 0x20f9,
4342 + "%s %d %8phC DS %d\n",
4343 + __func__, __LINE__, sess->port_name, sess->disc_state);
4344 +
4345 switch (sess->disc_state) {
4346 case DSC_DELETED:
4347 qlt_plogi_ack_unref(vha, pla);
4348 @@ -4782,12 +4800,20 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4349 }
4350
4351 if (conflict_sess) {
4352 - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4353 - "PRLI with conflicting sess %p port %8phC\n",
4354 - conflict_sess, conflict_sess->port_name);
4355 - qlt_send_term_imm_notif(vha, iocb, 1);
4356 - res = 0;
4357 - break;
4358 + switch (conflict_sess->disc_state) {
4359 + case DSC_DELETED:
4360 + case DSC_DELETE_PEND:
4361 + break;
4362 + default:
4363 + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4364 + "PRLI with conflicting sess %p port %8phC\n",
4365 + conflict_sess, conflict_sess->port_name);
4366 + conflict_sess->fw_login_state =
4367 + DSC_LS_PORT_UNAVAIL;
4368 + qlt_send_term_imm_notif(vha, iocb, 1);
4369 + res = 0;
4370 + break;
4371 + }
4372 }
4373
4374 if (sess != NULL) {
4375 @@ -5755,7 +5781,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4376 unsigned long flags;
4377 u8 newfcport = 0;
4378
4379 - fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
4380 + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4381 if (!fcport) {
4382 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
4383 "qla_target(%d): Allocation of tmp FC port failed",
4384 @@ -5784,6 +5810,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4385 tfcp->port_type = fcport->port_type;
4386 tfcp->supported_classes = fcport->supported_classes;
4387 tfcp->flags |= fcport->flags;
4388 + tfcp->scan_state = QLA_FCPORT_FOUND;
4389
4390 del = fcport;
4391 fcport = tfcp;
4392 diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
4393 index 3737c6d3b064..61628581c6a2 100644
4394 --- a/drivers/scsi/scsi_error.c
4395 +++ b/drivers/scsi/scsi_error.c
4396 @@ -222,7 +222,8 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
4397
4398 static void scsi_eh_inc_host_failed(struct rcu_head *head)
4399 {
4400 - struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
4401 + struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
4402 + struct Scsi_Host *shost = scmd->device->host;
4403 unsigned long flags;
4404
4405 spin_lock_irqsave(shost->host_lock, flags);
4406 @@ -258,7 +259,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
4407 * Ensure that all tasks observe the host state change before the
4408 * host_failed change.
4409 */
4410 - call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
4411 + call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);
4412 }
4413
4414 /**
4415 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
4416 index 83856ee14851..8f9a2e50d742 100644
4417 --- a/drivers/scsi/scsi_lib.c
4418 +++ b/drivers/scsi/scsi_lib.c
4419 @@ -670,6 +670,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
4420 if (!blk_rq_is_scsi(req)) {
4421 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
4422 cmd->flags &= ~SCMD_INITIALIZED;
4423 + destroy_rcu_head(&cmd->rcu);
4424 }
4425
4426 if (req->mq_ctx) {
4427 @@ -1150,6 +1151,7 @@ void scsi_initialize_rq(struct request *rq)
4428 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
4429
4430 scsi_req_init(&cmd->req);
4431 + init_rcu_head(&cmd->rcu);
4432 cmd->jiffies_at_alloc = jiffies;
4433 cmd->retries = 0;
4434 }
4435 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
4436 index eb30f3e09a47..71458f493cf8 100644
4437 --- a/drivers/virtio/virtio_ring.c
4438 +++ b/drivers/virtio/virtio_ring.c
4439 @@ -428,8 +428,6 @@ static inline int virtqueue_add(struct virtqueue *_vq,
4440 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
4441 }
4442
4443 - vq->vq.num_free += total_sg;
4444 -
4445 if (indirect)
4446 kfree(desc);
4447
4448 diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
4449 index 67fbe35ce7cf..b0a158073abd 100644
4450 --- a/drivers/watchdog/hpwdt.c
4451 +++ b/drivers/watchdog/hpwdt.c
4452 @@ -28,16 +28,7 @@
4453 #include <linux/types.h>
4454 #include <linux/uaccess.h>
4455 #include <linux/watchdog.h>
4456 -#ifdef CONFIG_HPWDT_NMI_DECODING
4457 -#include <linux/dmi.h>
4458 -#include <linux/spinlock.h>
4459 -#include <linux/nmi.h>
4460 -#include <linux/kdebug.h>
4461 -#include <linux/notifier.h>
4462 -#include <asm/set_memory.h>
4463 -#endif /* CONFIG_HPWDT_NMI_DECODING */
4464 #include <asm/nmi.h>
4465 -#include <asm/frame.h>
4466
4467 #define HPWDT_VERSION "1.4.0"
4468 #define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
4469 @@ -48,10 +39,14 @@
4470 static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
4471 static unsigned int reload; /* the computed soft_margin */
4472 static bool nowayout = WATCHDOG_NOWAYOUT;
4473 +#ifdef CONFIG_HPWDT_NMI_DECODING
4474 +static unsigned int allow_kdump = 1;
4475 +#endif
4476 static char expect_release;
4477 static unsigned long hpwdt_is_open;
4478
4479 static void __iomem *pci_mem_addr; /* the PCI-memory address */
4480 +static unsigned long __iomem *hpwdt_nmistat;
4481 static unsigned long __iomem *hpwdt_timer_reg;
4482 static unsigned long __iomem *hpwdt_timer_con;
4483
4484 @@ -62,373 +57,6 @@ static const struct pci_device_id hpwdt_devices[] = {
4485 };
4486 MODULE_DEVICE_TABLE(pci, hpwdt_devices);
4487
4488 -#ifdef CONFIG_HPWDT_NMI_DECODING
4489 -#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
4490 -#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
4491 -#define PCI_BIOS32_PARAGRAPH_LEN 16
4492 -#define PCI_ROM_BASE1 0x000F0000
4493 -#define ROM_SIZE 0x10000
4494 -
4495 -struct bios32_service_dir {
4496 - u32 signature;
4497 - u32 entry_point;
4498 - u8 revision;
4499 - u8 length;
4500 - u8 checksum;
4501 - u8 reserved[5];
4502 -};
4503 -
4504 -/* type 212 */
4505 -struct smbios_cru64_info {
4506 - u8 type;
4507 - u8 byte_length;
4508 - u16 handle;
4509 - u32 signature;
4510 - u64 physical_address;
4511 - u32 double_length;
4512 - u32 double_offset;
4513 -};
4514 -#define SMBIOS_CRU64_INFORMATION 212
4515 -
4516 -/* type 219 */
4517 -struct smbios_proliant_info {
4518 - u8 type;
4519 - u8 byte_length;
4520 - u16 handle;
4521 - u32 power_features;
4522 - u32 omega_features;
4523 - u32 reserved;
4524 - u32 misc_features;
4525 -};
4526 -#define SMBIOS_ICRU_INFORMATION 219
4527 -
4528 -
4529 -struct cmn_registers {
4530 - union {
4531 - struct {
4532 - u8 ral;
4533 - u8 rah;
4534 - u16 rea2;
4535 - };
4536 - u32 reax;
4537 - } u1;
4538 - union {
4539 - struct {
4540 - u8 rbl;
4541 - u8 rbh;
4542 - u8 reb2l;
4543 - u8 reb2h;
4544 - };
4545 - u32 rebx;
4546 - } u2;
4547 - union {
4548 - struct {
4549 - u8 rcl;
4550 - u8 rch;
4551 - u16 rec2;
4552 - };
4553 - u32 recx;
4554 - } u3;
4555 - union {
4556 - struct {
4557 - u8 rdl;
4558 - u8 rdh;
4559 - u16 red2;
4560 - };
4561 - u32 redx;
4562 - } u4;
4563 -
4564 - u32 resi;
4565 - u32 redi;
4566 - u16 rds;
4567 - u16 res;
4568 - u32 reflags;
4569 -} __attribute__((packed));
4570 -
4571 -static unsigned int hpwdt_nmi_decoding;
4572 -static unsigned int allow_kdump = 1;
4573 -static unsigned int is_icru;
4574 -static unsigned int is_uefi;
4575 -static DEFINE_SPINLOCK(rom_lock);
4576 -static void *cru_rom_addr;
4577 -static struct cmn_registers cmn_regs;
4578 -
4579 -extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
4580 - unsigned long *pRomEntry);
4581 -
4582 -#ifdef CONFIG_X86_32
4583 -/* --32 Bit Bios------------------------------------------------------------ */
4584 -
4585 -#define HPWDT_ARCH 32
4586 -
4587 -asm(".text \n\t"
4588 - ".align 4 \n\t"
4589 - ".globl asminline_call \n"
4590 - "asminline_call: \n\t"
4591 - "pushl %ebp \n\t"
4592 - "movl %esp, %ebp \n\t"
4593 - "pusha \n\t"
4594 - "pushf \n\t"
4595 - "push %es \n\t"
4596 - "push %ds \n\t"
4597 - "pop %es \n\t"
4598 - "movl 8(%ebp),%eax \n\t"
4599 - "movl 4(%eax),%ebx \n\t"
4600 - "movl 8(%eax),%ecx \n\t"
4601 - "movl 12(%eax),%edx \n\t"
4602 - "movl 16(%eax),%esi \n\t"
4603 - "movl 20(%eax),%edi \n\t"
4604 - "movl (%eax),%eax \n\t"
4605 - "push %cs \n\t"
4606 - "call *12(%ebp) \n\t"
4607 - "pushf \n\t"
4608 - "pushl %eax \n\t"
4609 - "movl 8(%ebp),%eax \n\t"
4610 - "movl %ebx,4(%eax) \n\t"
4611 - "movl %ecx,8(%eax) \n\t"
4612 - "movl %edx,12(%eax) \n\t"
4613 - "movl %esi,16(%eax) \n\t"
4614 - "movl %edi,20(%eax) \n\t"
4615 - "movw %ds,24(%eax) \n\t"
4616 - "movw %es,26(%eax) \n\t"
4617 - "popl %ebx \n\t"
4618 - "movl %ebx,(%eax) \n\t"
4619 - "popl %ebx \n\t"
4620 - "movl %ebx,28(%eax) \n\t"
4621 - "pop %es \n\t"
4622 - "popf \n\t"
4623 - "popa \n\t"
4624 - "leave \n\t"
4625 - "ret \n\t"
4626 - ".previous");
4627 -
4628 -
4629 -/*
4630 - * cru_detect
4631 - *
4632 - * Routine Description:
4633 - * This function uses the 32-bit BIOS Service Directory record to
4634 - * search for a $CRU record.
4635 - *
4636 - * Return Value:
4637 - * 0 : SUCCESS
4638 - * <0 : FAILURE
4639 - */
4640 -static int cru_detect(unsigned long map_entry,
4641 - unsigned long map_offset)
4642 -{
4643 - void *bios32_map;
4644 - unsigned long *bios32_entrypoint;
4645 - unsigned long cru_physical_address;
4646 - unsigned long cru_length;
4647 - unsigned long physical_bios_base = 0;
4648 - unsigned long physical_bios_offset = 0;
4649 - int retval = -ENODEV;
4650 -
4651 - bios32_map = ioremap(map_entry, (2 * PAGE_SIZE));
4652 -
4653 - if (bios32_map == NULL)
4654 - return -ENODEV;
4655 -
4656 - bios32_entrypoint = bios32_map + map_offset;
4657 -
4658 - cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
4659 -
4660 - set_memory_x((unsigned long)bios32_map, 2);
4661 - asminline_call(&cmn_regs, bios32_entrypoint);
4662 -
4663 - if (cmn_regs.u1.ral != 0) {
4664 - pr_warn("Call succeeded but with an error: 0x%x\n",
4665 - cmn_regs.u1.ral);
4666 - } else {
4667 - physical_bios_base = cmn_regs.u2.rebx;
4668 - physical_bios_offset = cmn_regs.u4.redx;
4669 - cru_length = cmn_regs.u3.recx;
4670 - cru_physical_address =
4671 - physical_bios_base + physical_bios_offset;
4672 -
4673 - /* If the values look OK, then map it in. */
4674 - if ((physical_bios_base + physical_bios_offset)) {
4675 - cru_rom_addr =
4676 - ioremap(cru_physical_address, cru_length);
4677 - if (cru_rom_addr) {
4678 - set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
4679 - (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT);
4680 - retval = 0;
4681 - }
4682 - }
4683 -
4684 - pr_debug("CRU Base Address: 0x%lx\n", physical_bios_base);
4685 - pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset);
4686 - pr_debug("CRU Length: 0x%lx\n", cru_length);
4687 - pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr);
4688 - }
4689 - iounmap(bios32_map);
4690 - return retval;
4691 -}
4692 -
4693 -/*
4694 - * bios_checksum
4695 - */
4696 -static int bios_checksum(const char __iomem *ptr, int len)
4697 -{
4698 - char sum = 0;
4699 - int i;
4700 -
4701 - /*
4702 - * calculate checksum of size bytes. This should add up
4703 - * to zero if we have a valid header.
4704 - */
4705 - for (i = 0; i < len; i++)
4706 - sum += ptr[i];
4707 -
4708 - return ((sum == 0) && (len > 0));
4709 -}
4710 -
4711 -/*
4712 - * bios32_present
4713 - *
4714 - * Routine Description:
4715 - * This function finds the 32-bit BIOS Service Directory
4716 - *
4717 - * Return Value:
4718 - * 0 : SUCCESS
4719 - * <0 : FAILURE
4720 - */
4721 -static int bios32_present(const char __iomem *p)
4722 -{
4723 - struct bios32_service_dir *bios_32_ptr;
4724 - int length;
4725 - unsigned long map_entry, map_offset;
4726 -
4727 - bios_32_ptr = (struct bios32_service_dir *) p;
4728 -
4729 - /*
4730 - * Search for signature by checking equal to the swizzled value
4731 - * instead of calling another routine to perform a strcmp.
4732 - */
4733 - if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) {
4734 - length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN;
4735 - if (bios_checksum(p, length)) {
4736 - /*
4737 - * According to the spec, we're looking for the
4738 - * first 4KB-aligned address below the entrypoint
4739 - * listed in the header. The Service Directory code
4740 - * is guaranteed to occupy no more than 2 4KB pages.
4741 - */
4742 - map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1);
4743 - map_offset = bios_32_ptr->entry_point - map_entry;
4744 -
4745 - return cru_detect(map_entry, map_offset);
4746 - }
4747 - }
4748 - return -ENODEV;
4749 -}
4750 -
4751 -static int detect_cru_service(void)
4752 -{
4753 - char __iomem *p, *q;
4754 - int rc = -1;
4755 -
4756 - /*
4757 - * Search from 0x0f0000 through 0x0fffff, inclusive.
4758 - */
4759 - p = ioremap(PCI_ROM_BASE1, ROM_SIZE);
4760 - if (p == NULL)
4761 - return -ENOMEM;
4762 -
4763 - for (q = p; q < p + ROM_SIZE; q += 16) {
4764 - rc = bios32_present(q);
4765 - if (!rc)
4766 - break;
4767 - }
4768 - iounmap(p);
4769 - return rc;
4770 -}
4771 -/* ------------------------------------------------------------------------- */
4772 -#endif /* CONFIG_X86_32 */
4773 -#ifdef CONFIG_X86_64
4774 -/* --64 Bit Bios------------------------------------------------------------ */
4775 -
4776 -#define HPWDT_ARCH 64
4777 -
4778 -asm(".text \n\t"
4779 - ".align 4 \n\t"
4780 - ".globl asminline_call \n\t"
4781 - ".type asminline_call, @function \n\t"
4782 - "asminline_call: \n\t"
4783 - FRAME_BEGIN
4784 - "pushq %rax \n\t"
4785 - "pushq %rbx \n\t"
4786 - "pushq %rdx \n\t"
4787 - "pushq %r12 \n\t"
4788 - "pushq %r9 \n\t"
4789 - "movq %rsi, %r12 \n\t"
4790 - "movq %rdi, %r9 \n\t"
4791 - "movl 4(%r9),%ebx \n\t"
4792 - "movl 8(%r9),%ecx \n\t"
4793 - "movl 12(%r9),%edx \n\t"
4794 - "movl 16(%r9),%esi \n\t"
4795 - "movl 20(%r9),%edi \n\t"
4796 - "movl (%r9),%eax \n\t"
4797 - "call *%r12 \n\t"
4798 - "pushfq \n\t"
4799 - "popq %r12 \n\t"
4800 - "movl %eax, (%r9) \n\t"
4801 - "movl %ebx, 4(%r9) \n\t"
4802 - "movl %ecx, 8(%r9) \n\t"
4803 - "movl %edx, 12(%r9) \n\t"
4804 - "movl %esi, 16(%r9) \n\t"
4805 - "movl %edi, 20(%r9) \n\t"
4806 - "movq %r12, %rax \n\t"
4807 - "movl %eax, 28(%r9) \n\t"
4808 - "popq %r9 \n\t"
4809 - "popq %r12 \n\t"
4810 - "popq %rdx \n\t"
4811 - "popq %rbx \n\t"
4812 - "popq %rax \n\t"
4813 - FRAME_END
4814 - "ret \n\t"
4815 - ".previous");
4816 -
4817 -/*
4818 - * dmi_find_cru
4819 - *
4820 - * Routine Description:
4821 - * This function checks whether or not a SMBIOS/DMI record is
4822 - * the 64bit CRU info or not
4823 - */
4824 -static void dmi_find_cru(const struct dmi_header *dm, void *dummy)
4825 -{
4826 - struct smbios_cru64_info *smbios_cru64_ptr;
4827 - unsigned long cru_physical_address;
4828 -
4829 - if (dm->type == SMBIOS_CRU64_INFORMATION) {
4830 - smbios_cru64_ptr = (struct smbios_cru64_info *) dm;
4831 - if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) {
4832 - cru_physical_address =
4833 - smbios_cru64_ptr->physical_address +
4834 - smbios_cru64_ptr->double_offset;
4835 - cru_rom_addr = ioremap(cru_physical_address,
4836 - smbios_cru64_ptr->double_length);
4837 - set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
4838 - smbios_cru64_ptr->double_length >> PAGE_SHIFT);
4839 - }
4840 - }
4841 -}
4842 -
4843 -static int detect_cru_service(void)
4844 -{
4845 - cru_rom_addr = NULL;
4846 -
4847 - dmi_walk(dmi_find_cru, NULL);
4848 -
4849 - /* if cru_rom_addr has been set then we found a CRU service */
4850 - return ((cru_rom_addr != NULL) ? 0 : -ENODEV);
4851 -}
4852 -/* ------------------------------------------------------------------------- */
4853 -#endif /* CONFIG_X86_64 */
4854 -#endif /* CONFIG_HPWDT_NMI_DECODING */
4855
4856 /*
4857 * Watchdog operations
4858 @@ -475,32 +103,22 @@ static int hpwdt_time_left(void)
4859 }
4860
4861 #ifdef CONFIG_HPWDT_NMI_DECODING
4862 +static int hpwdt_my_nmi(void)
4863 +{
4864 + return ioread8(hpwdt_nmistat) & 0x6;
4865 +}
4866 +
4867 /*
4868 * NMI Handler
4869 */
4870 static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
4871 {
4872 - unsigned long rom_pl;
4873 - static int die_nmi_called;
4874 -
4875 - if (!hpwdt_nmi_decoding)
4876 + if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
4877 return NMI_DONE;
4878
4879 - spin_lock_irqsave(&rom_lock, rom_pl);
4880 - if (!die_nmi_called && !is_icru && !is_uefi)
4881 - asminline_call(&cmn_regs, cru_rom_addr);
4882 - die_nmi_called = 1;
4883 - spin_unlock_irqrestore(&rom_lock, rom_pl);
4884 -
4885 if (allow_kdump)
4886 hpwdt_stop();
4887
4888 - if (!is_icru && !is_uefi) {
4889 - if (cmn_regs.u1.ral == 0) {
4890 - nmi_panic(regs, "An NMI occurred, but unable to determine source.\n");
4891 - return NMI_HANDLED;
4892 - }
4893 - }
4894 nmi_panic(regs, "An NMI occurred. Depending on your system the reason "
4895 "for the NMI is logged in any one of the following "
4896 "resources:\n"
4897 @@ -666,84 +284,11 @@ static struct miscdevice hpwdt_miscdev = {
4898 * Init & Exit
4899 */
4900
4901 -#ifdef CONFIG_HPWDT_NMI_DECODING
4902 -#ifdef CONFIG_X86_LOCAL_APIC
4903 -static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
4904 -{
4905 - /*
4906 - * If nmi_watchdog is turned off then we can turn on
4907 - * our nmi decoding capability.
4908 - */
4909 - hpwdt_nmi_decoding = 1;
4910 -}
4911 -#else
4912 -static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
4913 -{
4914 - dev_warn(&dev->dev, "NMI decoding is disabled. "
4915 - "Your kernel does not support a NMI Watchdog.\n");
4916 -}
4917 -#endif /* CONFIG_X86_LOCAL_APIC */
4918 -
4919 -/*
4920 - * dmi_find_icru
4921 - *
4922 - * Routine Description:
4923 - * This function checks whether or not we are on an iCRU-based server.
4924 - * This check is independent of architecture and needs to be made for
4925 - * any ProLiant system.
4926 - */
4927 -static void dmi_find_icru(const struct dmi_header *dm, void *dummy)
4928 -{
4929 - struct smbios_proliant_info *smbios_proliant_ptr;
4930 -
4931 - if (dm->type == SMBIOS_ICRU_INFORMATION) {
4932 - smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
4933 - if (smbios_proliant_ptr->misc_features & 0x01)
4934 - is_icru = 1;
4935 - if (smbios_proliant_ptr->misc_features & 0x408)
4936 - is_uefi = 1;
4937 - }
4938 -}
4939
4940 static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
4941 {
4942 +#ifdef CONFIG_HPWDT_NMI_DECODING
4943 int retval;
4944 -
4945 - /*
4946 - * On typical CRU-based systems we need to map that service in
4947 - * the BIOS. For 32 bit Operating Systems we need to go through
4948 - * the 32 Bit BIOS Service Directory. For 64 bit Operating
4949 - * Systems we get that service through SMBIOS.
4950 - *
4951 - * On systems that support the new iCRU service all we need to
4952 - * do is call dmi_walk to get the supported flag value and skip
4953 - * the old cru detect code.
4954 - */
4955 - dmi_walk(dmi_find_icru, NULL);
4956 - if (!is_icru && !is_uefi) {
4957 -
4958 - /*
4959 - * We need to map the ROM to get the CRU service.
4960 - * For 32 bit Operating Systems we need to go through the 32 Bit
4961 - * BIOS Service Directory
4962 - * For 64 bit Operating Systems we get that service through SMBIOS.
4963 - */
4964 - retval = detect_cru_service();
4965 - if (retval < 0) {
4966 - dev_warn(&dev->dev,
4967 - "Unable to detect the %d Bit CRU Service.\n",
4968 - HPWDT_ARCH);
4969 - return retval;
4970 - }
4971 -
4972 - /*
4973 - * We know this is the only CRU call we need to make so lets keep as
4974 - * few instructions as possible once the NMI comes in.
4975 - */
4976 - cmn_regs.u1.rah = 0x0D;
4977 - cmn_regs.u1.ral = 0x02;
4978 - }
4979 -
4980 /*
4981 * Only one function can register for NMI_UNKNOWN
4982 */
4983 @@ -771,44 +316,25 @@ static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
4984 dev_warn(&dev->dev,
4985 "Unable to register a die notifier (err=%d).\n",
4986 retval);
4987 - if (cru_rom_addr)
4988 - iounmap(cru_rom_addr);
4989 return retval;
4990 +#endif /* CONFIG_HPWDT_NMI_DECODING */
4991 + return 0;
4992 }
4993
4994 static void hpwdt_exit_nmi_decoding(void)
4995 {
4996 +#ifdef CONFIG_HPWDT_NMI_DECODING
4997 unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
4998 unregister_nmi_handler(NMI_SERR, "hpwdt");
4999 unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
5000 - if (cru_rom_addr)
5001 - iounmap(cru_rom_addr);
5002 -}
5003 -#else /* !CONFIG_HPWDT_NMI_DECODING */
5004 -static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
5005 -{
5006 -}
5007 -
5008 -static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
5009 -{
5010 - return 0;
5011 +#endif
5012 }
5013
5014 -static void hpwdt_exit_nmi_decoding(void)
5015 -{
5016 -}
5017 -#endif /* CONFIG_HPWDT_NMI_DECODING */
5018 -
5019 static int hpwdt_init_one(struct pci_dev *dev,
5020 const struct pci_device_id *ent)
5021 {
5022 int retval;
5023
5024 - /*
5025 - * Check if we can do NMI decoding or not
5026 - */
5027 - hpwdt_check_nmi_decoding(dev);
5028 -
5029 /*
5030 * First let's find out if we are on an iLO2+ server. We will
5031 * not run on a legacy ASM box.
5032 @@ -842,6 +368,7 @@ static int hpwdt_init_one(struct pci_dev *dev,
5033 retval = -ENOMEM;
5034 goto error_pci_iomap;
5035 }
5036 + hpwdt_nmistat = pci_mem_addr + 0x6e;
5037 hpwdt_timer_reg = pci_mem_addr + 0x70;
5038 hpwdt_timer_con = pci_mem_addr + 0x72;
5039
5040 @@ -912,6 +439,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
5041 #ifdef CONFIG_HPWDT_NMI_DECODING
5042 module_param(allow_kdump, int, 0);
5043 MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
5044 -#endif /* !CONFIG_HPWDT_NMI_DECODING */
5045 +#endif /* CONFIG_HPWDT_NMI_DECODING */
5046
5047 module_pci_driver(hpwdt_driver);
5048 diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
5049 index 8c10b0562e75..621c517b325c 100644
5050 --- a/fs/nfs/direct.c
5051 +++ b/fs/nfs/direct.c
5052 @@ -86,10 +86,10 @@ struct nfs_direct_req {
5053 struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
5054 int mirror_count;
5055
5056 + loff_t io_start; /* Start offset for I/O */
5057 ssize_t count, /* bytes actually processed */
5058 max_count, /* max expected count */
5059 bytes_left, /* bytes left to be sent */
5060 - io_start, /* start of IO */
5061 error; /* any reported error */
5062 struct completion completion; /* wait for i/o completion */
5063
5064 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
5065 index eb098ccfefd5..b99200828d08 100644
5066 --- a/fs/nfs/pnfs.c
5067 +++ b/fs/nfs/pnfs.c
5068 @@ -292,8 +292,11 @@ pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
5069 void
5070 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
5071 {
5072 - struct inode *inode = lo->plh_inode;
5073 + struct inode *inode;
5074
5075 + if (!lo)
5076 + return;
5077 + inode = lo->plh_inode;
5078 pnfs_layoutreturn_before_put_layout_hdr(lo);
5079
5080 if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
5081 @@ -1241,10 +1244,12 @@ bool pnfs_roc(struct inode *ino,
5082 spin_lock(&ino->i_lock);
5083 lo = nfsi->layout;
5084 if (!lo || !pnfs_layout_is_valid(lo) ||
5085 - test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
5086 + test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
5087 + lo = NULL;
5088 goto out_noroc;
5089 + }
5090 + pnfs_get_layout_hdr(lo);
5091 if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
5092 - pnfs_get_layout_hdr(lo);
5093 spin_unlock(&ino->i_lock);
5094 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
5095 TASK_UNINTERRUPTIBLE);
5096 @@ -1312,10 +1317,12 @@ bool pnfs_roc(struct inode *ino,
5097 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
5098 if (ld->prepare_layoutreturn)
5099 ld->prepare_layoutreturn(args);
5100 + pnfs_put_layout_hdr(lo);
5101 return true;
5102 }
5103 if (layoutreturn)
5104 pnfs_send_layoutreturn(lo, &stateid, iomode, true);
5105 + pnfs_put_layout_hdr(lo);
5106 return false;
5107 }
5108
5109 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
5110 index cf61108f8f8d..8607ad8626f6 100644
5111 --- a/fs/nfs/write.c
5112 +++ b/fs/nfs/write.c
5113 @@ -1878,40 +1878,43 @@ int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
5114 return status;
5115 }
5116
5117 -int nfs_commit_inode(struct inode *inode, int how)
5118 +static int __nfs_commit_inode(struct inode *inode, int how,
5119 + struct writeback_control *wbc)
5120 {
5121 LIST_HEAD(head);
5122 struct nfs_commit_info cinfo;
5123 int may_wait = how & FLUSH_SYNC;
5124 - int error = 0;
5125 - int res;
5126 + int ret, nscan;
5127
5128 nfs_init_cinfo_from_inode(&cinfo, inode);
5129 nfs_commit_begin(cinfo.mds);
5130 - res = nfs_scan_commit(inode, &head, &cinfo);
5131 - if (res)
5132 - error = nfs_generic_commit_list(inode, &head, how, &cinfo);
5133 + for (;;) {
5134 + ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
5135 + if (ret <= 0)
5136 + break;
5137 + ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
5138 + if (ret < 0)
5139 + break;
5140 + ret = 0;
5141 + if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
5142 + if (nscan < wbc->nr_to_write)
5143 + wbc->nr_to_write -= nscan;
5144 + else
5145 + wbc->nr_to_write = 0;
5146 + }
5147 + if (nscan < INT_MAX)
5148 + break;
5149 + cond_resched();
5150 + }
5151 nfs_commit_end(cinfo.mds);
5152 - if (res == 0)
5153 - return res;
5154 - if (error < 0)
5155 - goto out_error;
5156 - if (!may_wait)
5157 - goto out_mark_dirty;
5158 - error = wait_on_commit(cinfo.mds);
5159 - if (error < 0)
5160 - return error;
5161 - return res;
5162 -out_error:
5163 - res = error;
5164 - /* Note: If we exit without ensuring that the commit is complete,
5165 - * we must mark the inode as dirty. Otherwise, future calls to
5166 - * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
5167 - * that the data is on the disk.
5168 - */
5169 -out_mark_dirty:
5170 - __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
5171 - return res;
5172 + if (ret || !may_wait)
5173 + return ret;
5174 + return wait_on_commit(cinfo.mds);
5175 +}
5176 +
5177 +int nfs_commit_inode(struct inode *inode, int how)
5178 +{
5179 + return __nfs_commit_inode(inode, how, NULL);
5180 }
5181 EXPORT_SYMBOL_GPL(nfs_commit_inode);
5182
5183 @@ -1921,11 +1924,11 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5184 int flags = FLUSH_SYNC;
5185 int ret = 0;
5186
5187 - /* no commits means nothing needs to be done */
5188 - if (!atomic_long_read(&nfsi->commit_info.ncommit))
5189 - return ret;
5190 -
5191 if (wbc->sync_mode == WB_SYNC_NONE) {
5192 + /* no commits means nothing needs to be done */
5193 + if (!atomic_long_read(&nfsi->commit_info.ncommit))
5194 + goto check_requests_outstanding;
5195 +
5196 /* Don't commit yet if this is a non-blocking flush and there
5197 * are a lot of outstanding writes for this mapping.
5198 */
5199 @@ -1936,16 +1939,16 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5200 flags = 0;
5201 }
5202
5203 - ret = nfs_commit_inode(inode, flags);
5204 - if (ret >= 0) {
5205 - if (wbc->sync_mode == WB_SYNC_NONE) {
5206 - if (ret < wbc->nr_to_write)
5207 - wbc->nr_to_write -= ret;
5208 - else
5209 - wbc->nr_to_write = 0;
5210 - }
5211 - return 0;
5212 - }
5213 + ret = __nfs_commit_inode(inode, flags, wbc);
5214 + if (!ret) {
5215 + if (flags & FLUSH_SYNC)
5216 + return 0;
5217 + } else if (atomic_long_read(&nfsi->commit_info.ncommit))
5218 + goto out_mark_dirty;
5219 +
5220 +check_requests_outstanding:
5221 + if (!atomic_read(&nfsi->commit_info.rpcs_out))
5222 + return ret;
5223 out_mark_dirty:
5224 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
5225 return ret;
5226 diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
5227 index beb945e1963c..ef3e7ea76296 100644
5228 --- a/fs/overlayfs/namei.c
5229 +++ b/fs/overlayfs/namei.c
5230 @@ -678,9 +678,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
5231 stack[ctr].layer = lower.layer;
5232 ctr++;
5233
5234 - if (d.stop)
5235 - break;
5236 -
5237 /*
5238 * Following redirects can have security consequences: it's like
5239 * a symlink into the lower layer without the permission checks.
5240 @@ -697,6 +694,9 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
5241 goto out_put;
5242 }
5243
5244 + if (d.stop)
5245 + break;
5246 +
5247 if (d.redirect && d.redirect[0] == '/' && poe != roe) {
5248 poe = roe;
5249
5250 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
5251 index 76e237bd989b..6914633037a5 100644
5252 --- a/include/drm/drm_crtc_helper.h
5253 +++ b/include/drm/drm_crtc_helper.h
5254 @@ -77,5 +77,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev);
5255
5256 void drm_kms_helper_poll_disable(struct drm_device *dev);
5257 void drm_kms_helper_poll_enable(struct drm_device *dev);
5258 +bool drm_kms_helper_is_poll_worker(void);
5259
5260 #endif
5261 diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
5262 index 412e83a4d3db..29c839ed656b 100644
5263 --- a/include/drm/drm_drv.h
5264 +++ b/include/drm/drm_drv.h
5265 @@ -55,6 +55,7 @@ struct drm_mode_create_dumb;
5266 #define DRIVER_ATOMIC 0x10000
5267 #define DRIVER_KMS_LEGACY_CONTEXT 0x20000
5268 #define DRIVER_SYNCOBJ 0x40000
5269 +#define DRIVER_PREFER_XBGR_30BPP 0x80000
5270
5271 /**
5272 * struct drm_driver - DRM driver structure
5273 diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
5274 index 3b609edffa8f..be3aef6839f6 100644
5275 --- a/include/linux/compiler-clang.h
5276 +++ b/include/linux/compiler-clang.h
5277 @@ -19,3 +19,8 @@
5278
5279 #define randomized_struct_fields_start struct {
5280 #define randomized_struct_fields_end };
5281 +
5282 +/* Clang doesn't have a way to turn it off per-function, yet. */
5283 +#ifdef __noretpoline
5284 +#undef __noretpoline
5285 +#endif
5286 diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
5287 index 73bc63e0a1c4..673fbf904fe5 100644
5288 --- a/include/linux/compiler-gcc.h
5289 +++ b/include/linux/compiler-gcc.h
5290 @@ -93,6 +93,10 @@
5291 #define __weak __attribute__((weak))
5292 #define __alias(symbol) __attribute__((alias(#symbol)))
5293
5294 +#ifdef RETPOLINE
5295 +#define __noretpoline __attribute__((indirect_branch("keep")))
5296 +#endif
5297 +
5298 /*
5299 * it doesn't make sense on ARM (currently the only user of __naked)
5300 * to trace naked functions because then mcount is called without
5301 diff --git a/include/linux/init.h b/include/linux/init.h
5302 index 506a98151131..bc27cf03c41e 100644
5303 --- a/include/linux/init.h
5304 +++ b/include/linux/init.h
5305 @@ -6,10 +6,10 @@
5306 #include <linux/types.h>
5307
5308 /* Built-in __init functions needn't be compiled with retpoline */
5309 -#if defined(RETPOLINE) && !defined(MODULE)
5310 -#define __noretpoline __attribute__((indirect_branch("keep")))
5311 +#if defined(__noretpoline) && !defined(MODULE)
5312 +#define __noinitretpoline __noretpoline
5313 #else
5314 -#define __noretpoline
5315 +#define __noinitretpoline
5316 #endif
5317
5318 /* These macros are used to mark some functions or
5319 @@ -47,7 +47,7 @@
5320
5321 /* These are for everybody (although not all archs will actually
5322 discard it in modules) */
5323 -#define __init __section(.init.text) __cold __latent_entropy __noretpoline
5324 +#define __init __section(.init.text) __cold __latent_entropy __noinitretpoline
5325 #define __initdata __section(.init.data)
5326 #define __initconst __section(.init.rodata)
5327 #define __exitdata __section(.exit.data)
5328 diff --git a/include/linux/nospec.h b/include/linux/nospec.h
5329 index 132e3f5a2e0d..e791ebc65c9c 100644
5330 --- a/include/linux/nospec.h
5331 +++ b/include/linux/nospec.h
5332 @@ -5,6 +5,7 @@
5333
5334 #ifndef _LINUX_NOSPEC_H
5335 #define _LINUX_NOSPEC_H
5336 +#include <asm/barrier.h>
5337
5338 /**
5339 * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
5340 @@ -29,26 +30,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
5341 }
5342 #endif
5343
5344 -/*
5345 - * Warn developers about inappropriate array_index_nospec() usage.
5346 - *
5347 - * Even if the CPU speculates past the WARN_ONCE branch, the
5348 - * sign bit of @index is taken into account when generating the
5349 - * mask.
5350 - *
5351 - * This warning is compiled out when the compiler can infer that
5352 - * @index and @size are less than LONG_MAX.
5353 - */
5354 -#define array_index_mask_nospec_check(index, size) \
5355 -({ \
5356 - if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
5357 - "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
5358 - _mask = 0; \
5359 - else \
5360 - _mask = array_index_mask_nospec(index, size); \
5361 - _mask; \
5362 -})
5363 -
5364 /*
5365 * array_index_nospec - sanitize an array index after a bounds check
5366 *
5367 @@ -67,7 +48,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
5368 ({ \
5369 typeof(index) _i = (index); \
5370 typeof(size) _s = (size); \
5371 - unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
5372 + unsigned long _mask = array_index_mask_nospec(_i, _s); \
5373 \
5374 BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
5375 BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
5376 diff --git a/include/linux/tpm.h b/include/linux/tpm.h
5377 index 5a090f5ab335..881312d85574 100644
5378 --- a/include/linux/tpm.h
5379 +++ b/include/linux/tpm.h
5380 @@ -50,6 +50,7 @@ struct tpm_class_ops {
5381 unsigned long *timeout_cap);
5382 int (*request_locality)(struct tpm_chip *chip, int loc);
5383 void (*relinquish_locality)(struct tpm_chip *chip, int loc);
5384 + void (*clk_enable)(struct tpm_chip *chip, bool value);
5385 };
5386
5387 #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
5388 diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
5389 index 4a54ef96aff5..bc0cda180c8b 100644
5390 --- a/include/linux/workqueue.h
5391 +++ b/include/linux/workqueue.h
5392 @@ -465,6 +465,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
5393
5394 extern void workqueue_set_max_active(struct workqueue_struct *wq,
5395 int max_active);
5396 +extern struct work_struct *current_work(void);
5397 extern bool current_is_workqueue_rescuer(void);
5398 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
5399 extern unsigned int work_busy(struct work_struct *work);
5400 diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
5401 index 7fb57e905526..7bc752fc98de 100644
5402 --- a/include/scsi/scsi_cmnd.h
5403 +++ b/include/scsi/scsi_cmnd.h
5404 @@ -69,6 +69,9 @@ struct scsi_cmnd {
5405 struct list_head list; /* scsi_cmnd participates in queue lists */
5406 struct list_head eh_entry; /* entry for the host eh_cmd_q */
5407 struct delayed_work abort_work;
5408 +
5409 + struct rcu_head rcu;
5410 +
5411 int eh_eflags; /* Used by error handlr */
5412
5413 /*
5414 diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
5415 index 1a1df0d21ee3..a8b7bf879ced 100644
5416 --- a/include/scsi/scsi_host.h
5417 +++ b/include/scsi/scsi_host.h
5418 @@ -571,8 +571,6 @@ struct Scsi_Host {
5419 struct blk_mq_tag_set tag_set;
5420 };
5421
5422 - struct rcu_head rcu;
5423 -
5424 atomic_t host_busy; /* commands actually active on low-level */
5425 atomic_t host_blocked;
5426
5427 diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
5428 index ce5b669003b2..ea8212118404 100644
5429 --- a/kernel/bpf/cpumap.c
5430 +++ b/kernel/bpf/cpumap.c
5431 @@ -339,7 +339,7 @@ static int cpu_map_kthread_run(void *data)
5432
5433 struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, int map_id)
5434 {
5435 - gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
5436 + gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
5437 struct bpf_cpu_map_entry *rcpu;
5438 int numa, err;
5439
5440 diff --git a/kernel/panic.c b/kernel/panic.c
5441 index 2cfef408fec9..4b794f1d8561 100644
5442 --- a/kernel/panic.c
5443 +++ b/kernel/panic.c
5444 @@ -640,7 +640,7 @@ device_initcall(register_warn_debugfs);
5445 */
5446 __visible void __stack_chk_fail(void)
5447 {
5448 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
5449 + panic("stack-protector: Kernel stack is corrupted in: %pB\n",
5450 __builtin_return_address(0));
5451 }
5452 EXPORT_SYMBOL(__stack_chk_fail);
5453 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
5454 index f699122dab32..34f1e1a2ec12 100644
5455 --- a/kernel/workqueue.c
5456 +++ b/kernel/workqueue.c
5457 @@ -4168,6 +4168,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
5458 }
5459 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
5460
5461 +/**
5462 + * current_work - retrieve %current task's work struct
5463 + *
5464 + * Determine if %current task is a workqueue worker and what it's working on.
5465 + * Useful to find out the context that the %current task is running in.
5466 + *
5467 + * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
5468 + */
5469 +struct work_struct *current_work(void)
5470 +{
5471 + struct worker *worker = current_wq_worker();
5472 +
5473 + return worker ? worker->current_work : NULL;
5474 +}
5475 +EXPORT_SYMBOL(current_work);
5476 +
5477 /**
5478 * current_is_workqueue_rescuer - is %current workqueue rescuer?
5479 *
5480 diff --git a/lib/bug.c b/lib/bug.c
5481 index c1b0fad31b10..1077366f496b 100644
5482 --- a/lib/bug.c
5483 +++ b/lib/bug.c
5484 @@ -150,6 +150,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
5485 return BUG_TRAP_TYPE_NONE;
5486
5487 bug = find_bug(bugaddr);
5488 + if (!bug)
5489 + return BUG_TRAP_TYPE_NONE;
5490
5491 file = NULL;
5492 line = 0;
5493 @@ -191,7 +193,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
5494 if (file)
5495 pr_crit("kernel BUG at %s:%u!\n", file, line);
5496 else
5497 - pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n",
5498 + pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
5499 (void *)bugaddr);
5500
5501 return BUG_TRAP_TYPE_BUG;
5502 diff --git a/mm/memblock.c b/mm/memblock.c
5503 index 46aacdfa4f4d..d25b5a456cca 100644
5504 --- a/mm/memblock.c
5505 +++ b/mm/memblock.c
5506 @@ -1107,7 +1107,7 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
5507 struct memblock_type *type = &memblock.memory;
5508 unsigned int right = type->cnt;
5509 unsigned int mid, left = 0;
5510 - phys_addr_t addr = PFN_PHYS(pfn + 1);
5511 + phys_addr_t addr = PFN_PHYS(++pfn);
5512
5513 do {
5514 mid = (right + left) / 2;
5515 @@ -1118,15 +1118,15 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
5516 type->regions[mid].size))
5517 left = mid + 1;
5518 else {
5519 - /* addr is within the region, so pfn + 1 is valid */
5520 - return min(pfn + 1, max_pfn);
5521 + /* addr is within the region, so pfn is valid */
5522 + return pfn;
5523 }
5524 } while (left < right);
5525
5526 if (right == type->cnt)
5527 - return max_pfn;
5528 + return -1UL;
5529 else
5530 - return min(PHYS_PFN(type->regions[right].base), max_pfn);
5531 + return PHYS_PFN(type->regions[right].base);
5532 }
5533
5534 /**
5535 diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
5536 index 279527f8b1fe..59baaecd3e54 100644
5537 --- a/net/bridge/netfilter/ebt_among.c
5538 +++ b/net/bridge/netfilter/ebt_among.c
5539 @@ -172,18 +172,35 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
5540 return true;
5541 }
5542
5543 +static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
5544 +{
5545 + return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
5546 +}
5547 +
5548 static int ebt_among_mt_check(const struct xt_mtchk_param *par)
5549 {
5550 const struct ebt_among_info *info = par->matchinfo;
5551 const struct ebt_entry_match *em =
5552 container_of(par->matchinfo, const struct ebt_entry_match, data);
5553 - int expected_length = sizeof(struct ebt_among_info);
5554 + unsigned int expected_length = sizeof(struct ebt_among_info);
5555 const struct ebt_mac_wormhash *wh_dst, *wh_src;
5556 int err;
5557
5558 + if (expected_length > em->match_size)
5559 + return -EINVAL;
5560 +
5561 wh_dst = ebt_among_wh_dst(info);
5562 - wh_src = ebt_among_wh_src(info);
5563 + if (poolsize_invalid(wh_dst))
5564 + return -EINVAL;
5565 +
5566 expected_length += ebt_mac_wormhash_size(wh_dst);
5567 + if (expected_length > em->match_size)
5568 + return -EINVAL;
5569 +
5570 + wh_src = ebt_among_wh_src(info);
5571 + if (poolsize_invalid(wh_src))
5572 + return -EINVAL;
5573 +
5574 expected_length += ebt_mac_wormhash_size(wh_src);
5575
5576 if (em->match_size != EBT_ALIGN(expected_length)) {
5577 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
5578 index 37817d25b63d..895ba1cd9750 100644
5579 --- a/net/bridge/netfilter/ebtables.c
5580 +++ b/net/bridge/netfilter/ebtables.c
5581 @@ -2053,7 +2053,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
5582 if (match_kern)
5583 match_kern->match_size = ret;
5584
5585 - WARN_ON(type == EBT_COMPAT_TARGET && size_left);
5586 + if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
5587 + return -EINVAL;
5588 +
5589 match32 = (struct compat_ebt_entry_mwt *) buf;
5590 }
5591
5592 @@ -2109,6 +2111,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
5593 *
5594 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
5595 */
5596 + for (i = 0; i < 4 ; ++i) {
5597 + if (offsets[i] >= *total)
5598 + return -EINVAL;
5599 + if (i == 0)
5600 + continue;
5601 + if (offsets[i-1] > offsets[i])
5602 + return -EINVAL;
5603 + }
5604 +
5605 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
5606 struct compat_ebt_entry_mwt *match32;
5607 unsigned int size;
5608 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
5609 index 0c3c944a7b72..8e5185ad6310 100644
5610 --- a/net/ipv4/netfilter/arp_tables.c
5611 +++ b/net/ipv4/netfilter/arp_tables.c
5612 @@ -257,6 +257,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
5613 }
5614 if (table_base + v
5615 != arpt_next_entry(e)) {
5616 + if (unlikely(stackidx >= private->stacksize)) {
5617 + verdict = NF_DROP;
5618 + break;
5619 + }
5620 jumpstack[stackidx++] = e;
5621 }
5622
5623 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
5624 index 2e0d339028bb..a74a81624983 100644
5625 --- a/net/ipv4/netfilter/ip_tables.c
5626 +++ b/net/ipv4/netfilter/ip_tables.c
5627 @@ -335,8 +335,13 @@ ipt_do_table(struct sk_buff *skb,
5628 continue;
5629 }
5630 if (table_base + v != ipt_next_entry(e) &&
5631 - !(e->ip.flags & IPT_F_GOTO))
5632 + !(e->ip.flags & IPT_F_GOTO)) {
5633 + if (unlikely(stackidx >= private->stacksize)) {
5634 + verdict = NF_DROP;
5635 + break;
5636 + }
5637 jumpstack[stackidx++] = e;
5638 + }
5639
5640 e = get_entry(table_base, v);
5641 continue;
5642 diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
5643 index 1e4a7209a3d2..77a01c484807 100644
5644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
5645 +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
5646 @@ -107,12 +107,6 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
5647
5648 local_bh_disable();
5649 if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
5650 - list_del_rcu(&c->list);
5651 - spin_unlock(&cn->lock);
5652 - local_bh_enable();
5653 -
5654 - unregister_netdevice_notifier(&c->notifier);
5655 -
5656 /* In case anyone still accesses the file, the open/close
5657 * functions are also incrementing the refcount on their own,
5658 * so it's safe to remove the entry even if it's in use. */
5659 @@ -120,6 +114,12 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
5660 if (cn->procdir)
5661 proc_remove(c->pde);
5662 #endif
5663 + list_del_rcu(&c->list);
5664 + spin_unlock(&cn->lock);
5665 + local_bh_enable();
5666 +
5667 + unregister_netdevice_notifier(&c->notifier);
5668 +
5669 return;
5670 }
5671 local_bh_enable();
5672 diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
5673 index 39970e212ad5..9bf260459f83 100644
5674 --- a/net/ipv6/netfilter.c
5675 +++ b/net/ipv6/netfilter.c
5676 @@ -21,18 +21,19 @@
5677 int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
5678 {
5679 const struct ipv6hdr *iph = ipv6_hdr(skb);
5680 + struct sock *sk = sk_to_full_sk(skb->sk);
5681 unsigned int hh_len;
5682 struct dst_entry *dst;
5683 struct flowi6 fl6 = {
5684 - .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
5685 + .flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
5686 .flowi6_mark = skb->mark,
5687 - .flowi6_uid = sock_net_uid(net, skb->sk),
5688 + .flowi6_uid = sock_net_uid(net, sk),
5689 .daddr = iph->daddr,
5690 .saddr = iph->saddr,
5691 };
5692 int err;
5693
5694 - dst = ip6_route_output(net, skb->sk, &fl6);
5695 + dst = ip6_route_output(net, sk, &fl6);
5696 err = dst->error;
5697 if (err) {
5698 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
5699 @@ -50,7 +51,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
5700 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
5701 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
5702 skb_dst_set(skb, NULL);
5703 - dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0);
5704 + dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
5705 if (IS_ERR(dst))
5706 return PTR_ERR(dst);
5707 skb_dst_set(skb, dst);
5708 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
5709 index 1d7ae9366335..51f3bc632c7c 100644
5710 --- a/net/ipv6/netfilter/ip6_tables.c
5711 +++ b/net/ipv6/netfilter/ip6_tables.c
5712 @@ -357,6 +357,10 @@ ip6t_do_table(struct sk_buff *skb,
5713 }
5714 if (table_base + v != ip6t_next_entry(e) &&
5715 !(e->ipv6.flags & IP6T_F_GOTO)) {
5716 + if (unlikely(stackidx >= private->stacksize)) {
5717 + verdict = NF_DROP;
5718 + break;
5719 + }
5720 jumpstack[stackidx++] = e;
5721 }
5722
5723 diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
5724 index 1d2fb9267d6f..6a203fa82dbd 100644
5725 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
5726 +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
5727 @@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
5728 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
5729 target, maniptype))
5730 return false;
5731 +
5732 + /* must reload, offset might have changed */
5733 + ipv6h = (void *)skb->data + iphdroff;
5734 +
5735 manip_addr:
5736 if (maniptype == NF_NAT_MANIP_SRC)
5737 ipv6h->saddr = target->src.u3.in6;
5738 diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
5739 index fbce552a796e..7d7466dbf663 100644
5740 --- a/net/netfilter/nf_nat_proto_common.c
5741 +++ b/net/netfilter/nf_nat_proto_common.c
5742 @@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
5743 const struct nf_conn *ct,
5744 u16 *rover)
5745 {
5746 - unsigned int range_size, min, i;
5747 + unsigned int range_size, min, max, i;
5748 __be16 *portptr;
5749 u_int16_t off;
5750
5751 @@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
5752 }
5753 } else {
5754 min = ntohs(range->min_proto.all);
5755 - range_size = ntohs(range->max_proto.all) - min + 1;
5756 + max = ntohs(range->max_proto.all);
5757 + if (unlikely(max < min))
5758 + swap(max, min);
5759 + range_size = max - min + 1;
5760 }
5761
5762 if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
5763 diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
5764 index ee3421ad108d..18b7412ab99a 100644
5765 --- a/net/netfilter/xt_IDLETIMER.c
5766 +++ b/net/netfilter/xt_IDLETIMER.c
5767 @@ -146,11 +146,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
5768 timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
5769 info->timer->refcnt = 1;
5770
5771 + INIT_WORK(&info->timer->work, idletimer_tg_work);
5772 +
5773 mod_timer(&info->timer->timer,
5774 msecs_to_jiffies(info->timeout * 1000) + jiffies);
5775
5776 - INIT_WORK(&info->timer->work, idletimer_tg_work);
5777 -
5778 return 0;
5779
5780 out_free_attr:
5781 @@ -191,7 +191,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
5782 pr_debug("timeout value is zero\n");
5783 return -EINVAL;
5784 }
5785 -
5786 + if (info->timeout >= INT_MAX / 1000) {
5787 + pr_debug("timeout value is too big\n");
5788 + return -EINVAL;
5789 + }
5790 if (info->label[0] == '\0' ||
5791 strnlen(info->label,
5792 MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
5793 diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
5794 index 0971634e5444..18d3af5e1098 100644
5795 --- a/net/netfilter/xt_LED.c
5796 +++ b/net/netfilter/xt_LED.c
5797 @@ -142,9 +142,10 @@ static int led_tg_check(const struct xt_tgchk_param *par)
5798 goto exit_alloc;
5799 }
5800
5801 - /* See if we need to set up a timer */
5802 - if (ledinfo->delay > 0)
5803 - timer_setup(&ledinternal->timer, led_timeout_callback, 0);
5804 + /* Since the letinternal timer can be shared between multiple targets,
5805 + * always set it up, even if the current target does not need it
5806 + */
5807 + timer_setup(&ledinternal->timer, led_timeout_callback, 0);
5808
5809 list_add_tail(&ledinternal->list, &xt_led_triggers);
5810
5811 @@ -181,8 +182,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
5812
5813 list_del(&ledinternal->list);
5814
5815 - if (ledinfo->delay > 0)
5816 - del_timer_sync(&ledinternal->timer);
5817 + del_timer_sync(&ledinternal->timer);
5818
5819 led_trigger_unregister(&ledinternal->netfilter_led_trigger);
5820
5821 diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
5822 index 5da8746f7b88..b8a3e740ffd4 100644
5823 --- a/net/netfilter/xt_hashlimit.c
5824 +++ b/net/netfilter/xt_hashlimit.c
5825 @@ -774,7 +774,7 @@ hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par,
5826 if (!dh->rateinfo.prev_window &&
5827 (dh->rateinfo.current_rate <= dh->rateinfo.burst)) {
5828 spin_unlock(&dh->lock);
5829 - rcu_read_unlock_bh();
5830 + local_bh_enable();
5831 return !(cfg->mode & XT_HASHLIMIT_INVERT);
5832 } else {
5833 goto overlimit;
5834 diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
5835 index 6451c5013e06..af465e681b9b 100644
5836 --- a/net/smc/af_smc.c
5837 +++ b/net/smc/af_smc.c
5838 @@ -1369,8 +1369,10 @@ static int smc_create(struct net *net, struct socket *sock, int protocol,
5839 smc->use_fallback = false; /* assume rdma capability first */
5840 rc = sock_create_kern(net, PF_INET, SOCK_STREAM,
5841 IPPROTO_TCP, &smc->clcsock);
5842 - if (rc)
5843 + if (rc) {
5844 sk_common_release(sk);
5845 + goto out;
5846 + }
5847 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
5848 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
5849
5850 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
5851 index 47cddf32aeba..4f2b25d43ec9 100644
5852 --- a/scripts/Makefile.build
5853 +++ b/scripts/Makefile.build
5854 @@ -256,6 +256,8 @@ __objtool_obj := $(objtree)/tools/objtool/objtool
5855
5856 objtool_args = $(if $(CONFIG_UNWINDER_ORC),orc generate,check)
5857
5858 +objtool_args += $(if $(part-of-module), --module,)
5859 +
5860 ifndef CONFIG_FRAME_POINTER
5861 objtool_args += --no-fp
5862 endif
5863 @@ -264,6 +266,12 @@ objtool_args += --no-unreachable
5864 else
5865 objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
5866 endif
5867 +ifdef CONFIG_RETPOLINE
5868 +ifneq ($(RETPOLINE_CFLAGS),)
5869 + objtool_args += --retpoline
5870 +endif
5871 +endif
5872 +
5873
5874 ifdef CONFIG_MODVERSIONS
5875 objtool_o = $(@D)/.tmp_$(@F)
5876 diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
5877 index 015aa9dbad86..06cf4c00fe88 100644
5878 --- a/scripts/Makefile.lib
5879 +++ b/scripts/Makefile.lib
5880 @@ -287,11 +287,11 @@ cmd_dt_S_dtb= \
5881 echo '\#include <asm-generic/vmlinux.lds.h>'; \
5882 echo '.section .dtb.init.rodata,"a"'; \
5883 echo '.balign STRUCT_ALIGNMENT'; \
5884 - echo '.global __dtb_$(*F)_begin'; \
5885 - echo '__dtb_$(*F)_begin:'; \
5886 + echo '.global __dtb_$(subst -,_,$(*F))_begin'; \
5887 + echo '__dtb_$(subst -,_,$(*F))_begin:'; \
5888 echo '.incbin "$<" '; \
5889 - echo '__dtb_$(*F)_end:'; \
5890 - echo '.global __dtb_$(*F)_end'; \
5891 + echo '__dtb_$(subst -,_,$(*F))_end:'; \
5892 + echo '.global __dtb_$(subst -,_,$(*F))_end'; \
5893 echo '.balign STRUCT_ALIGNMENT'; \
5894 ) > $@
5895
5896 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
5897 index a42cbbf2c8d9..35ff97bfd492 100644
5898 --- a/sound/core/seq/seq_clientmgr.c
5899 +++ b/sound/core/seq/seq_clientmgr.c
5900 @@ -910,7 +910,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
5901 static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
5902 struct snd_seq_event *event,
5903 struct file *file, int blocking,
5904 - int atomic, int hop)
5905 + int atomic, int hop,
5906 + struct mutex *mutexp)
5907 {
5908 struct snd_seq_event_cell *cell;
5909 int err;
5910 @@ -948,7 +949,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
5911 return -ENXIO; /* queue is not allocated */
5912
5913 /* allocate an event cell */
5914 - err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
5915 + err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
5916 + file, mutexp);
5917 if (err < 0)
5918 return err;
5919
5920 @@ -1017,12 +1019,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
5921 return -ENXIO;
5922
5923 /* allocate the pool now if the pool is not allocated yet */
5924 + mutex_lock(&client->ioctl_mutex);
5925 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
5926 - mutex_lock(&client->ioctl_mutex);
5927 err = snd_seq_pool_init(client->pool);
5928 - mutex_unlock(&client->ioctl_mutex);
5929 if (err < 0)
5930 - return -ENOMEM;
5931 + goto out;
5932 }
5933
5934 /* only process whole events */
5935 @@ -1073,7 +1074,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
5936 /* ok, enqueue it */
5937 err = snd_seq_client_enqueue_event(client, &event, file,
5938 !(file->f_flags & O_NONBLOCK),
5939 - 0, 0);
5940 + 0, 0, &client->ioctl_mutex);
5941 if (err < 0)
5942 break;
5943
5944 @@ -1084,6 +1085,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
5945 written += len;
5946 }
5947
5948 + out:
5949 + mutex_unlock(&client->ioctl_mutex);
5950 return written ? written : err;
5951 }
5952
5953 @@ -1838,6 +1841,9 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
5954 (! snd_seq_write_pool_allocated(client) ||
5955 info->output_pool != client->pool->size)) {
5956 if (snd_seq_write_pool_allocated(client)) {
5957 + /* is the pool in use? */
5958 + if (atomic_read(&client->pool->counter))
5959 + return -EBUSY;
5960 /* remove all existing cells */
5961 snd_seq_pool_mark_closing(client->pool);
5962 snd_seq_queue_client_leave_cells(client->number);
5963 @@ -2260,7 +2266,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
5964 if (! cptr->accept_output)
5965 result = -EPERM;
5966 else /* send it */
5967 - result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
5968 + result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
5969 + atomic, hop, NULL);
5970
5971 snd_seq_client_unlock(cptr);
5972 return result;
5973 diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
5974 index a8c2822e0198..72c0302a55d2 100644
5975 --- a/sound/core/seq/seq_fifo.c
5976 +++ b/sound/core/seq/seq_fifo.c
5977 @@ -125,7 +125,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
5978 return -EINVAL;
5979
5980 snd_use_lock_use(&f->use_lock);
5981 - err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
5982 + err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
5983 if (err < 0) {
5984 if ((err == -ENOMEM) || (err == -EAGAIN))
5985 atomic_inc(&f->overflow);
5986 diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
5987 index f763682584a8..ab1112e90f88 100644
5988 --- a/sound/core/seq/seq_memory.c
5989 +++ b/sound/core/seq/seq_memory.c
5990 @@ -220,7 +220,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
5991 */
5992 static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
5993 struct snd_seq_event_cell **cellp,
5994 - int nonblock, struct file *file)
5995 + int nonblock, struct file *file,
5996 + struct mutex *mutexp)
5997 {
5998 struct snd_seq_event_cell *cell;
5999 unsigned long flags;
6000 @@ -244,7 +245,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
6001 set_current_state(TASK_INTERRUPTIBLE);
6002 add_wait_queue(&pool->output_sleep, &wait);
6003 spin_unlock_irq(&pool->lock);
6004 + if (mutexp)
6005 + mutex_unlock(mutexp);
6006 schedule();
6007 + if (mutexp)
6008 + mutex_lock(mutexp);
6009 spin_lock_irq(&pool->lock);
6010 remove_wait_queue(&pool->output_sleep, &wait);
6011 /* interrupted? */
6012 @@ -287,7 +292,7 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
6013 */
6014 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
6015 struct snd_seq_event_cell **cellp, int nonblock,
6016 - struct file *file)
6017 + struct file *file, struct mutex *mutexp)
6018 {
6019 int ncells, err;
6020 unsigned int extlen;
6021 @@ -304,7 +309,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
6022 if (ncells >= pool->total_elements)
6023 return -ENOMEM;
6024
6025 - err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
6026 + err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
6027 if (err < 0)
6028 return err;
6029
6030 @@ -330,7 +335,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
6031 int size = sizeof(struct snd_seq_event);
6032 if (len < size)
6033 size = len;
6034 - err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
6035 + err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
6036 + mutexp);
6037 if (err < 0)
6038 goto __error;
6039 if (cell->event.data.ext.ptr == NULL)
6040 diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
6041 index 32f959c17786..3abe306c394a 100644
6042 --- a/sound/core/seq/seq_memory.h
6043 +++ b/sound/core/seq/seq_memory.h
6044 @@ -66,7 +66,8 @@ struct snd_seq_pool {
6045 void snd_seq_cell_free(struct snd_seq_event_cell *cell);
6046
6047 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
6048 - struct snd_seq_event_cell **cellp, int nonblock, struct file *file);
6049 + struct snd_seq_event_cell **cellp, int nonblock,
6050 + struct file *file, struct mutex *mutexp);
6051
6052 /* return number of unused (free) cells */
6053 static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
6054 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
6055 index 37e1cf8218ff..5b4dbcec6de8 100644
6056 --- a/sound/pci/hda/patch_conexant.c
6057 +++ b/sound/pci/hda/patch_conexant.c
6058 @@ -957,6 +957,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
6059 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
6060 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
6061 SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
6062 + SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
6063 + SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
6064 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
6065 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
6066 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
6067 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6068 index 8fe38c18e29d..18bab5ffbe4a 100644
6069 --- a/sound/pci/hda/patch_realtek.c
6070 +++ b/sound/pci/hda/patch_realtek.c
6071 @@ -5152,6 +5152,16 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec,
6072 }
6073 }
6074
6075 +/* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */
6076 +static void alc295_fixup_disable_dac3(struct hda_codec *codec,
6077 + const struct hda_fixup *fix, int action)
6078 +{
6079 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
6080 + hda_nid_t conn[2] = { 0x02, 0x03 };
6081 + snd_hda_override_conn_list(codec, 0x17, 2, conn);
6082 + }
6083 +}
6084 +
6085 /* Hook to update amp GPIO4 for automute */
6086 static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
6087 struct hda_jack_callback *jack)
6088 @@ -5344,6 +5354,7 @@ enum {
6089 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
6090 ALC255_FIXUP_DELL_SPK_NOISE,
6091 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
6092 + ALC295_FIXUP_DISABLE_DAC3,
6093 ALC280_FIXUP_HP_HEADSET_MIC,
6094 ALC221_FIXUP_HP_FRONT_MIC,
6095 ALC292_FIXUP_TPT460,
6096 @@ -5358,10 +5369,12 @@ enum {
6097 ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
6098 ALC233_FIXUP_LENOVO_MULTI_CODECS,
6099 ALC294_FIXUP_LENOVO_MIC_LOCATION,
6100 + ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
6101 ALC700_FIXUP_INTEL_REFERENCE,
6102 ALC274_FIXUP_DELL_BIND_DACS,
6103 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
6104 ALC298_FIXUP_TPT470_DOCK,
6105 + ALC255_FIXUP_DUMMY_LINEOUT_VERB,
6106 };
6107
6108 static const struct hda_fixup alc269_fixups[] = {
6109 @@ -6076,6 +6089,10 @@ static const struct hda_fixup alc269_fixups[] = {
6110 .chained = true,
6111 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
6112 },
6113 + [ALC295_FIXUP_DISABLE_DAC3] = {
6114 + .type = HDA_FIXUP_FUNC,
6115 + .v.func = alc295_fixup_disable_dac3,
6116 + },
6117 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
6118 .type = HDA_FIXUP_PINS,
6119 .v.pins = (const struct hda_pintbl[]) {
6120 @@ -6161,6 +6178,18 @@ static const struct hda_fixup alc269_fixups[] = {
6121 { }
6122 },
6123 },
6124 + [ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE] = {
6125 + .type = HDA_FIXUP_PINS,
6126 + .v.pins = (const struct hda_pintbl[]) {
6127 + { 0x16, 0x0101102f }, /* Rear Headset HP */
6128 + { 0x19, 0x02a1913c }, /* use as Front headset mic, without its own jack detect */
6129 + { 0x1a, 0x01a19030 }, /* Rear Headset MIC */
6130 + { 0x1b, 0x02011020 },
6131 + { }
6132 + },
6133 + .chained = true,
6134 + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6135 + },
6136 [ALC700_FIXUP_INTEL_REFERENCE] = {
6137 .type = HDA_FIXUP_VERBS,
6138 .v.verbs = (const struct hda_verb[]) {
6139 @@ -6197,6 +6226,15 @@ static const struct hda_fixup alc269_fixups[] = {
6140 .chained = true,
6141 .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
6142 },
6143 + [ALC255_FIXUP_DUMMY_LINEOUT_VERB] = {
6144 + .type = HDA_FIXUP_PINS,
6145 + .v.pins = (const struct hda_pintbl[]) {
6146 + { 0x14, 0x0201101f },
6147 + { }
6148 + },
6149 + .chained = true,
6150 + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
6151 + },
6152 };
6153
6154 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6155 @@ -6245,10 +6283,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6156 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
6157 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6158 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
6159 + SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
6160 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
6161 + SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
6162 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6163 SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6164 SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6165 + SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
6166 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6167 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6168 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
6169 @@ -6386,9 +6427,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6170 SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
6171 SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6172 SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6173 + SND_PCI_QUIRK(0x17aa, 0x2249, "Thinkpad", ALC292_FIXUP_TPT460),
6174 SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6175 SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6176 SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6177 + SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
6178 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6179 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6180 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6181 @@ -6750,7 +6793,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6182 {0x12, 0x90a60120},
6183 {0x14, 0x90170110},
6184 {0x21, 0x0321101f}),
6185 - SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
6186 + SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
6187 {0x12, 0xb7a60130},
6188 {0x14, 0x90170110},
6189 {0x21, 0x04211020}),
6190 diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
6191 index 57254f5b2779..694abc628e9b 100644
6192 --- a/tools/objtool/builtin-check.c
6193 +++ b/tools/objtool/builtin-check.c
6194 @@ -29,7 +29,7 @@
6195 #include "builtin.h"
6196 #include "check.h"
6197
6198 -bool no_fp, no_unreachable;
6199 +bool no_fp, no_unreachable, retpoline, module;
6200
6201 static const char * const check_usage[] = {
6202 "objtool check [<options>] file.o",
6203 @@ -39,6 +39,8 @@ static const char * const check_usage[] = {
6204 const struct option check_options[] = {
6205 OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
6206 OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
6207 + OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
6208 + OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
6209 OPT_END(),
6210 };
6211
6212 @@ -53,5 +55,5 @@ int cmd_check(int argc, const char **argv)
6213
6214 objname = argv[0];
6215
6216 - return check(objname, no_fp, no_unreachable, false);
6217 + return check(objname, false);
6218 }
6219 diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
6220 index 91e8e19ff5e0..77ea2b97117d 100644
6221 --- a/tools/objtool/builtin-orc.c
6222 +++ b/tools/objtool/builtin-orc.c
6223 @@ -25,7 +25,6 @@
6224 */
6225
6226 #include <string.h>
6227 -#include <subcmd/parse-options.h>
6228 #include "builtin.h"
6229 #include "check.h"
6230
6231 @@ -36,9 +35,6 @@ static const char *orc_usage[] = {
6232 NULL,
6233 };
6234
6235 -extern const struct option check_options[];
6236 -extern bool no_fp, no_unreachable;
6237 -
6238 int cmd_orc(int argc, const char **argv)
6239 {
6240 const char *objname;
6241 @@ -54,7 +50,7 @@ int cmd_orc(int argc, const char **argv)
6242
6243 objname = argv[0];
6244
6245 - return check(objname, no_fp, no_unreachable, true);
6246 + return check(objname, true);
6247 }
6248
6249 if (!strcmp(argv[0], "dump")) {
6250 diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h
6251 index dd526067fed5..28ff40e19a14 100644
6252 --- a/tools/objtool/builtin.h
6253 +++ b/tools/objtool/builtin.h
6254 @@ -17,6 +17,11 @@
6255 #ifndef _BUILTIN_H
6256 #define _BUILTIN_H
6257
6258 +#include <subcmd/parse-options.h>
6259 +
6260 +extern const struct option check_options[];
6261 +extern bool no_fp, no_unreachable, retpoline, module;
6262 +
6263 extern int cmd_check(int argc, const char **argv);
6264 extern int cmd_orc(int argc, const char **argv);
6265
6266 diff --git a/tools/objtool/check.c b/tools/objtool/check.c
6267 index c7fb5c2392ee..9d01d0b1084e 100644
6268 --- a/tools/objtool/check.c
6269 +++ b/tools/objtool/check.c
6270 @@ -18,6 +18,7 @@
6271 #include <string.h>
6272 #include <stdlib.h>
6273
6274 +#include "builtin.h"
6275 #include "check.h"
6276 #include "elf.h"
6277 #include "special.h"
6278 @@ -33,7 +34,6 @@ struct alternative {
6279 };
6280
6281 const char *objname;
6282 -static bool no_fp;
6283 struct cfi_state initial_func_cfi;
6284
6285 struct instruction *find_insn(struct objtool_file *file,
6286 @@ -496,6 +496,7 @@ static int add_jump_destinations(struct objtool_file *file)
6287 * disguise, so convert them accordingly.
6288 */
6289 insn->type = INSN_JUMP_DYNAMIC;
6290 + insn->retpoline_safe = true;
6291 continue;
6292 } else {
6293 /* sibling call */
6294 @@ -547,7 +548,8 @@ static int add_call_destinations(struct objtool_file *file)
6295 if (!insn->call_dest && !insn->ignore) {
6296 WARN_FUNC("unsupported intra-function call",
6297 insn->sec, insn->offset);
6298 - WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
6299 + if (retpoline)
6300 + WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
6301 return -1;
6302 }
6303
6304 @@ -922,7 +924,11 @@ static struct rela *find_switch_table(struct objtool_file *file,
6305 if (find_symbol_containing(file->rodata, text_rela->addend))
6306 continue;
6307
6308 - return find_rela_by_dest(file->rodata, text_rela->addend);
6309 + rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend);
6310 + if (!rodata_rela)
6311 + continue;
6312 +
6313 + return rodata_rela;
6314 }
6315
6316 return NULL;
6317 @@ -1107,6 +1113,41 @@ static int read_unwind_hints(struct objtool_file *file)
6318 return 0;
6319 }
6320
6321 +static int read_retpoline_hints(struct objtool_file *file)
6322 +{
6323 + struct section *sec;
6324 + struct instruction *insn;
6325 + struct rela *rela;
6326 +
6327 + sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
6328 + if (!sec)
6329 + return 0;
6330 +
6331 + list_for_each_entry(rela, &sec->rela_list, list) {
6332 + if (rela->sym->type != STT_SECTION) {
6333 + WARN("unexpected relocation symbol type in %s", sec->name);
6334 + return -1;
6335 + }
6336 +
6337 + insn = find_insn(file, rela->sym->sec, rela->addend);
6338 + if (!insn) {
6339 + WARN("bad .discard.retpoline_safe entry");
6340 + return -1;
6341 + }
6342 +
6343 + if (insn->type != INSN_JUMP_DYNAMIC &&
6344 + insn->type != INSN_CALL_DYNAMIC) {
6345 + WARN_FUNC("retpoline_safe hint not an indirect jump/call",
6346 + insn->sec, insn->offset);
6347 + return -1;
6348 + }
6349 +
6350 + insn->retpoline_safe = true;
6351 + }
6352 +
6353 + return 0;
6354 +}
6355 +
6356 static int decode_sections(struct objtool_file *file)
6357 {
6358 int ret;
6359 @@ -1145,6 +1186,10 @@ static int decode_sections(struct objtool_file *file)
6360 if (ret)
6361 return ret;
6362
6363 + ret = read_retpoline_hints(file);
6364 + if (ret)
6365 + return ret;
6366 +
6367 return 0;
6368 }
6369
6370 @@ -1890,6 +1935,38 @@ static int validate_unwind_hints(struct objtool_file *file)
6371 return warnings;
6372 }
6373
6374 +static int validate_retpoline(struct objtool_file *file)
6375 +{
6376 + struct instruction *insn;
6377 + int warnings = 0;
6378 +
6379 + for_each_insn(file, insn) {
6380 + if (insn->type != INSN_JUMP_DYNAMIC &&
6381 + insn->type != INSN_CALL_DYNAMIC)
6382 + continue;
6383 +
6384 + if (insn->retpoline_safe)
6385 + continue;
6386 +
6387 + /*
6388 + * .init.text code is ran before userspace and thus doesn't
6389 + * strictly need retpolines, except for modules which are
6390 + * loaded late, they very much do need retpoline in their
6391 + * .init.text
6392 + */
6393 + if (!strcmp(insn->sec->name, ".init.text") && !module)
6394 + continue;
6395 +
6396 + WARN_FUNC("indirect %s found in RETPOLINE build",
6397 + insn->sec, insn->offset,
6398 + insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
6399 +
6400 + warnings++;
6401 + }
6402 +
6403 + return warnings;
6404 +}
6405 +
6406 static bool is_kasan_insn(struct instruction *insn)
6407 {
6408 return (insn->type == INSN_CALL &&
6409 @@ -2021,13 +2098,12 @@ static void cleanup(struct objtool_file *file)
6410 elf_close(file->elf);
6411 }
6412
6413 -int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc)
6414 +int check(const char *_objname, bool orc)
6415 {
6416 struct objtool_file file;
6417 int ret, warnings = 0;
6418
6419 objname = _objname;
6420 - no_fp = _no_fp;
6421
6422 file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY);
6423 if (!file.elf)
6424 @@ -2051,6 +2127,13 @@ int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc)
6425 if (list_empty(&file.insn_list))
6426 goto out;
6427
6428 + if (retpoline) {
6429 + ret = validate_retpoline(&file);
6430 + if (ret < 0)
6431 + return ret;
6432 + warnings += ret;
6433 + }
6434 +
6435 ret = validate_functions(&file);
6436 if (ret < 0)
6437 goto out;
6438 diff --git a/tools/objtool/check.h b/tools/objtool/check.h
6439 index 23a1d065cae1..c6b68fcb926f 100644
6440 --- a/tools/objtool/check.h
6441 +++ b/tools/objtool/check.h
6442 @@ -45,6 +45,7 @@ struct instruction {
6443 unsigned char type;
6444 unsigned long immediate;
6445 bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
6446 + bool retpoline_safe;
6447 struct symbol *call_dest;
6448 struct instruction *jump_dest;
6449 struct instruction *first_jump_src;
6450 @@ -63,7 +64,7 @@ struct objtool_file {
6451 bool ignore_unreachables, c_file, hints;
6452 };
6453
6454 -int check(const char *objname, bool no_fp, bool no_unreachable, bool orc);
6455 +int check(const char *objname, bool orc);
6456
6457 struct instruction *find_insn(struct objtool_file *file,
6458 struct section *sec, unsigned long offset);
6459 diff --git a/tools/perf/util/trigger.h b/tools/perf/util/trigger.h
6460 index 370138e7e35c..88223bc7c82b 100644
6461 --- a/tools/perf/util/trigger.h
6462 +++ b/tools/perf/util/trigger.h
6463 @@ -12,7 +12,7 @@
6464 * States and transits:
6465 *
6466 *
6467 - * OFF--(on)--> READY --(hit)--> HIT
6468 + * OFF--> ON --> READY --(hit)--> HIT
6469 * ^ |
6470 * | (ready)
6471 * | |
6472 @@ -27,8 +27,9 @@ struct trigger {
6473 volatile enum {
6474 TRIGGER_ERROR = -2,
6475 TRIGGER_OFF = -1,
6476 - TRIGGER_READY = 0,
6477 - TRIGGER_HIT = 1,
6478 + TRIGGER_ON = 0,
6479 + TRIGGER_READY = 1,
6480 + TRIGGER_HIT = 2,
6481 } state;
6482 const char *name;
6483 };
6484 @@ -50,7 +51,7 @@ static inline bool trigger_is_error(struct trigger *t)
6485 static inline void trigger_on(struct trigger *t)
6486 {
6487 TRIGGER_WARN_ONCE(t, TRIGGER_OFF);
6488 - t->state = TRIGGER_READY;
6489 + t->state = TRIGGER_ON;
6490 }
6491
6492 static inline void trigger_ready(struct trigger *t)