Magellan Linux

Contents of /trunk/kernel-alx/patches-4.14/0126-4.14.27-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (show annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (6 years ago) by niro
File size: 203769 byte(s)
-added up to patches-4.14.79
1 diff --git a/Documentation/devicetree/bindings/power/mti,mips-cpc.txt b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt
2 new file mode 100644
3 index 000000000000..c6b82511ae8a
4 --- /dev/null
5 +++ b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt
6 @@ -0,0 +1,8 @@
7 +Binding for MIPS Cluster Power Controller (CPC).
8 +
9 +This binding allows a system to specify where the CPC registers are
10 +located.
11 +
12 +Required properties:
13 +compatible : Should be "mti,mips-cpc".
14 +regs: Should describe the address & size of the CPC register region.
15 diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
16 index 39aa9e8697cc..fbedcc39460b 100644
17 --- a/Documentation/sphinx/kerneldoc.py
18 +++ b/Documentation/sphinx/kerneldoc.py
19 @@ -36,8 +36,7 @@ import glob
20
21 from docutils import nodes, statemachine
22 from docutils.statemachine import ViewList
23 -from docutils.parsers.rst import directives
24 -from sphinx.util.compat import Directive
25 +from docutils.parsers.rst import directives, Directive
26 from sphinx.ext.autodoc import AutodocReporter
27
28 __version__ = '1.0'
29 diff --git a/MAINTAINERS b/MAINTAINERS
30 index 76ea063d8083..546beb6b0176 100644
31 --- a/MAINTAINERS
32 +++ b/MAINTAINERS
33 @@ -9001,6 +9001,7 @@ MIPS GENERIC PLATFORM
34 M: Paul Burton <paul.burton@mips.com>
35 L: linux-mips@linux-mips.org
36 S: Supported
37 +F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt
38 F: arch/mips/generic/
39 F: arch/mips/tools/generic-board-config.sh
40
41 diff --git a/Makefile b/Makefile
42 index 666182dda187..00e969db94b8 100644
43 --- a/Makefile
44 +++ b/Makefile
45 @@ -1,7 +1,7 @@
46 # SPDX-License-Identifier: GPL-2.0
47 VERSION = 4
48 PATCHLEVEL = 14
49 -SUBLEVEL = 26
50 +SUBLEVEL = 27
51 EXTRAVERSION =
52 NAME = Petit Gorille
53
54 @@ -11,6 +11,10 @@ NAME = Petit Gorille
55 # Comments in this file are targeted only to the developer, do not
56 # expect to learn how to build the kernel reading this file.
57
58 +# That's our default target when none is given on the command line
59 +PHONY := _all
60 +_all:
61 +
62 # o Do not use make's built-in rules and variables
63 # (this increases performance and avoids hard-to-debug behaviour);
64 # o Look for make include files relative to root of kernel src
65 @@ -117,10 +121,6 @@ ifeq ("$(origin O)", "command line")
66 KBUILD_OUTPUT := $(O)
67 endif
68
69 -# That's our default target when none is given on the command line
70 -PHONY := _all
71 -_all:
72 -
73 # Cancel implicit rules on top Makefile
74 $(CURDIR)/Makefile Makefile: ;
75
76 @@ -187,15 +187,6 @@ ifeq ("$(origin M)", "command line")
77 KBUILD_EXTMOD := $(M)
78 endif
79
80 -# If building an external module we do not care about the all: rule
81 -# but instead _all depend on modules
82 -PHONY += all
83 -ifeq ($(KBUILD_EXTMOD),)
84 -_all: all
85 -else
86 -_all: modules
87 -endif
88 -
89 ifeq ($(KBUILD_SRC),)
90 # building in the source tree
91 srctree := .
92 @@ -207,6 +198,9 @@ else
93 srctree := $(KBUILD_SRC)
94 endif
95 endif
96 +
97 +export KBUILD_CHECKSRC KBUILD_EXTMOD KBUILD_SRC
98 +
99 objtree := .
100 src := $(srctree)
101 obj := $(objtree)
102 @@ -215,6 +209,74 @@ VPATH := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD))
103
104 export srctree objtree VPATH
105
106 +# To make sure we do not include .config for any of the *config targets
107 +# catch them early, and hand them over to scripts/kconfig/Makefile
108 +# It is allowed to specify more targets when calling make, including
109 +# mixing *config targets and build targets.
110 +# For example 'make oldconfig all'.
111 +# Detect when mixed targets is specified, and make a second invocation
112 +# of make so .config is not included in this case either (for *config).
113 +
114 +version_h := include/generated/uapi/linux/version.h
115 +old_version_h := include/linux/version.h
116 +
117 +no-dot-config-targets := clean mrproper distclean \
118 + cscope gtags TAGS tags help% %docs check% coccicheck \
119 + $(version_h) headers_% archheaders archscripts \
120 + kernelversion %src-pkg
121 +
122 +config-targets := 0
123 +mixed-targets := 0
124 +dot-config := 1
125 +
126 +ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
127 + ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
128 + dot-config := 0
129 + endif
130 +endif
131 +
132 +ifeq ($(KBUILD_EXTMOD),)
133 + ifneq ($(filter config %config,$(MAKECMDGOALS)),)
134 + config-targets := 1
135 + ifneq ($(words $(MAKECMDGOALS)),1)
136 + mixed-targets := 1
137 + endif
138 + endif
139 +endif
140 +# install and modules_install need also be processed one by one
141 +ifneq ($(filter install,$(MAKECMDGOALS)),)
142 + ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
143 + mixed-targets := 1
144 + endif
145 +endif
146 +
147 +ifeq ($(mixed-targets),1)
148 +# ===========================================================================
149 +# We're called with mixed targets (*config and build targets).
150 +# Handle them one by one.
151 +
152 +PHONY += $(MAKECMDGOALS) __build_one_by_one
153 +
154 +$(filter-out __build_one_by_one, $(MAKECMDGOALS)): __build_one_by_one
155 + @:
156 +
157 +__build_one_by_one:
158 + $(Q)set -e; \
159 + for i in $(MAKECMDGOALS); do \
160 + $(MAKE) -f $(srctree)/Makefile $$i; \
161 + done
162 +
163 +else
164 +
165 +# We need some generic definitions (do not try to remake the file).
166 +scripts/Kbuild.include: ;
167 +include scripts/Kbuild.include
168 +
169 +# Read KERNELRELEASE from include/config/kernel.release (if it exists)
170 +KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
171 +KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
172 +export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
173 +
174 # SUBARCH tells the usermode build what the underlying arch is. That is set
175 # first, and if a usermode build is happening, the "ARCH=um" on the command
176 # line overrides the setting of ARCH below. If a native build is happening,
177 @@ -312,40 +374,6 @@ HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
178 -Wno-missing-field-initializers -fno-delete-null-pointer-checks
179 endif
180
181 -# Decide whether to build built-in, modular, or both.
182 -# Normally, just do built-in.
183 -
184 -KBUILD_MODULES :=
185 -KBUILD_BUILTIN := 1
186 -
187 -# If we have only "make modules", don't compile built-in objects.
188 -# When we're building modules with modversions, we need to consider
189 -# the built-in objects during the descend as well, in order to
190 -# make sure the checksums are up to date before we record them.
191 -
192 -ifeq ($(MAKECMDGOALS),modules)
193 - KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
194 -endif
195 -
196 -# If we have "make <whatever> modules", compile modules
197 -# in addition to whatever we do anyway.
198 -# Just "make" or "make all" shall build modules as well
199 -
200 -ifneq ($(filter all _all modules,$(MAKECMDGOALS)),)
201 - KBUILD_MODULES := 1
202 -endif
203 -
204 -ifeq ($(MAKECMDGOALS),)
205 - KBUILD_MODULES := 1
206 -endif
207 -
208 -export KBUILD_MODULES KBUILD_BUILTIN
209 -export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
210 -
211 -# We need some generic definitions (do not try to remake the file).
212 -scripts/Kbuild.include: ;
213 -include scripts/Kbuild.include
214 -
215 # Make variables (CC, etc...)
216 AS = $(CROSS_COMPILE)as
217 LD = $(CROSS_COMPILE)ld
218 @@ -405,11 +433,6 @@ KBUILD_CFLAGS_MODULE := -DMODULE
219 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
220 GCC_PLUGINS_CFLAGS :=
221
222 -# Read KERNELRELEASE from include/config/kernel.release (if it exists)
223 -KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
224 -KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
225 -
226 -export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
227 export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
228 export CPP AR NM STRIP OBJCOPY OBJDUMP HOSTLDFLAGS HOST_LOADLIBES
229 export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
230 @@ -459,73 +482,23 @@ ifneq ($(KBUILD_SRC),)
231 $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL)
232 endif
233
234 -# Support for using generic headers in asm-generic
235 -PHONY += asm-generic uapi-asm-generic
236 -asm-generic: uapi-asm-generic
237 - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
238 - src=asm obj=arch/$(SRCARCH)/include/generated/asm
239 -uapi-asm-generic:
240 - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
241 - src=uapi/asm obj=arch/$(SRCARCH)/include/generated/uapi/asm
242 -
243 -# To make sure we do not include .config for any of the *config targets
244 -# catch them early, and hand them over to scripts/kconfig/Makefile
245 -# It is allowed to specify more targets when calling make, including
246 -# mixing *config targets and build targets.
247 -# For example 'make oldconfig all'.
248 -# Detect when mixed targets is specified, and make a second invocation
249 -# of make so .config is not included in this case either (for *config).
250 -
251 -version_h := include/generated/uapi/linux/version.h
252 -old_version_h := include/linux/version.h
253 -
254 -no-dot-config-targets := clean mrproper distclean \
255 - cscope gtags TAGS tags help% %docs check% coccicheck \
256 - $(version_h) headers_% archheaders archscripts \
257 - kernelversion %src-pkg
258 -
259 -config-targets := 0
260 -mixed-targets := 0
261 -dot-config := 1
262 -
263 -ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
264 - ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
265 - dot-config := 0
266 - endif
267 +ifeq ($(cc-name),clang)
268 +ifneq ($(CROSS_COMPILE),)
269 +CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
270 +GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
271 endif
272 -
273 -ifeq ($(KBUILD_EXTMOD),)
274 - ifneq ($(filter config %config,$(MAKECMDGOALS)),)
275 - config-targets := 1
276 - ifneq ($(words $(MAKECMDGOALS)),1)
277 - mixed-targets := 1
278 - endif
279 - endif
280 +ifneq ($(GCC_TOOLCHAIN),)
281 +CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
282 endif
283 -# install and modules_install need also be processed one by one
284 -ifneq ($(filter install,$(MAKECMDGOALS)),)
285 - ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
286 - mixed-targets := 1
287 - endif
288 +KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
289 +KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
290 endif
291
292 -ifeq ($(mixed-targets),1)
293 -# ===========================================================================
294 -# We're called with mixed targets (*config and build targets).
295 -# Handle them one by one.
296 -
297 -PHONY += $(MAKECMDGOALS) __build_one_by_one
298 +RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
299 +RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
300 +RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
301 +export RETPOLINE_CFLAGS
302
303 -$(filter-out __build_one_by_one, $(MAKECMDGOALS)): __build_one_by_one
304 - @:
305 -
306 -__build_one_by_one:
307 - $(Q)set -e; \
308 - for i in $(MAKECMDGOALS); do \
309 - $(MAKE) -f $(srctree)/Makefile $$i; \
310 - done
311 -
312 -else
313 ifeq ($(config-targets),1)
314 # ===========================================================================
315 # *config targets only - make sure prerequisites are updated, and descend
316 @@ -548,6 +521,44 @@ else
317 # Build targets only - this includes vmlinux, arch specific targets, clean
318 # targets and others. In general all targets except *config targets.
319
320 +# If building an external module we do not care about the all: rule
321 +# but instead _all depend on modules
322 +PHONY += all
323 +ifeq ($(KBUILD_EXTMOD),)
324 +_all: all
325 +else
326 +_all: modules
327 +endif
328 +
329 +# Decide whether to build built-in, modular, or both.
330 +# Normally, just do built-in.
331 +
332 +KBUILD_MODULES :=
333 +KBUILD_BUILTIN := 1
334 +
335 +# If we have only "make modules", don't compile built-in objects.
336 +# When we're building modules with modversions, we need to consider
337 +# the built-in objects during the descend as well, in order to
338 +# make sure the checksums are up to date before we record them.
339 +
340 +ifeq ($(MAKECMDGOALS),modules)
341 + KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
342 +endif
343 +
344 +# If we have "make <whatever> modules", compile modules
345 +# in addition to whatever we do anyway.
346 +# Just "make" or "make all" shall build modules as well
347 +
348 +ifneq ($(filter all _all modules,$(MAKECMDGOALS)),)
349 + KBUILD_MODULES := 1
350 +endif
351 +
352 +ifeq ($(MAKECMDGOALS),)
353 + KBUILD_MODULES := 1
354 +endif
355 +
356 +export KBUILD_MODULES KBUILD_BUILTIN
357 +
358 ifeq ($(KBUILD_EXTMOD),)
359 # Additional helpers built in scripts/
360 # Carefully list dependencies so we do not try to build scripts twice
361 @@ -699,15 +710,6 @@ endif
362 KBUILD_CFLAGS += $(stackp-flag)
363
364 ifeq ($(cc-name),clang)
365 -ifneq ($(CROSS_COMPILE),)
366 -CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
367 -GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
368 -endif
369 -ifneq ($(GCC_TOOLCHAIN),)
370 -CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
371 -endif
372 -KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
373 -KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
374 KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
375 KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
376 KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
377 @@ -1076,6 +1078,15 @@ prepare0: archprepare gcc-plugins
378 # All the preparing..
379 prepare: prepare0 prepare-objtool
380
381 +# Support for using generic headers in asm-generic
382 +PHONY += asm-generic uapi-asm-generic
383 +asm-generic: uapi-asm-generic
384 + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
385 + src=asm obj=arch/$(SRCARCH)/include/generated/asm
386 +uapi-asm-generic:
387 + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
388 + src=uapi/asm obj=arch/$(SRCARCH)/include/generated/uapi/asm
389 +
390 PHONY += prepare-objtool
391 prepare-objtool: $(objtool_target)
392
393 diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
394 index 9ff92050053c..fa7f308c9027 100644
395 --- a/arch/arm/mach-omap2/omap-secure.c
396 +++ b/arch/arm/mach-omap2/omap-secure.c
397 @@ -73,6 +73,7 @@ phys_addr_t omap_secure_ram_mempool_base(void)
398 return omap_secure_memblock_base;
399 }
400
401 +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
402 u32 omap3_save_secure_ram(void __iomem *addr, int size)
403 {
404 u32 ret;
405 @@ -91,6 +92,7 @@ u32 omap3_save_secure_ram(void __iomem *addr, int size)
406
407 return ret;
408 }
409 +#endif
410
411 /**
412 * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
413 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
414 index fa20124c19d5..01bc0688d47d 100644
415 --- a/arch/arm64/mm/mmu.c
416 +++ b/arch/arm64/mm/mmu.c
417 @@ -107,7 +107,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
418 * The following mapping attributes may be updated in live
419 * kernel mappings without the need for break-before-make.
420 */
421 - static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
422 + static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
423
424 /* creating or taking down mappings is always safe */
425 if (old == 0 || new == 0)
426 @@ -117,9 +117,9 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
427 if ((old | new) & PTE_CONT)
428 return false;
429
430 - /* Transitioning from Global to Non-Global is safe */
431 - if (((old ^ new) == PTE_NG) && (new & PTE_NG))
432 - return true;
433 + /* Transitioning from Non-Global to Global is unsafe */
434 + if (old & ~new & PTE_NG)
435 + return false;
436
437 return ((old ^ new) & ~mask) == 0;
438 }
439 diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
440 index 9ab48ff80c1c..6d11ae581ea7 100644
441 --- a/arch/mips/ath25/board.c
442 +++ b/arch/mips/ath25/board.c
443 @@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
444 }
445
446 board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
447 + if (!board_data)
448 + goto error;
449 ath25_board.config = (struct ath25_boarddata *)board_data;
450 memcpy_fromio(board_data, bcfg, 0x100);
451 if (broken_boarddata) {
452 diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
453 index 5b3a3f6a9ad3..d99f5242169e 100644
454 --- a/arch/mips/cavium-octeon/octeon-irq.c
455 +++ b/arch/mips/cavium-octeon/octeon-irq.c
456 @@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
457 }
458
459 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
460 + if (!host_data)
461 + return -ENOMEM;
462 raw_spin_lock_init(&host_data->lock);
463
464 addr = of_get_address(ciu_node, 0, NULL, NULL);
465 diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
466 index 19c88d770054..fcf9af492d60 100644
467 --- a/arch/mips/kernel/mips-cpc.c
468 +++ b/arch/mips/kernel/mips-cpc.c
469 @@ -10,6 +10,8 @@
470
471 #include <linux/errno.h>
472 #include <linux/percpu.h>
473 +#include <linux/of.h>
474 +#include <linux/of_address.h>
475 #include <linux/spinlock.h>
476
477 #include <asm/mips-cps.h>
478 @@ -22,6 +24,17 @@ static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
479
480 phys_addr_t __weak mips_cpc_default_phys_base(void)
481 {
482 + struct device_node *cpc_node;
483 + struct resource res;
484 + int err;
485 +
486 + cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
487 + if (cpc_node) {
488 + err = of_address_to_resource(cpc_node, 0, &res);
489 + if (!err)
490 + return res.start;
491 + }
492 +
493 return 0;
494 }
495
496 diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
497 index 87dcac2447c8..382d12eb88f0 100644
498 --- a/arch/mips/kernel/smp-bmips.c
499 +++ b/arch/mips/kernel/smp-bmips.c
500 @@ -168,11 +168,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
501 return;
502 }
503
504 - if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
505 - "smp_ipi0", NULL))
506 + if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
507 + IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
508 panic("Can't request IPI0 interrupt");
509 - if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
510 - "smp_ipi1", NULL))
511 + if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
512 + IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
513 panic("Can't request IPI1 interrupt");
514 }
515
516 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
517 index f4f12ecd0cec..0fa3a788dd20 100644
518 --- a/arch/s390/kvm/kvm-s390.c
519 +++ b/arch/s390/kvm/kvm-s390.c
520 @@ -2119,6 +2119,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
521 /* we still need the basic sca for the ipte control */
522 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
523 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
524 + return;
525 }
526 read_lock(&vcpu->kvm->arch.sca_lock);
527 if (vcpu->kvm->arch.use_esca) {
528 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
529 index 559b37bf5a2e..7483cd514c32 100644
530 --- a/arch/x86/Kconfig
531 +++ b/arch/x86/Kconfig
532 @@ -431,6 +431,7 @@ config GOLDFISH
533 config RETPOLINE
534 bool "Avoid speculative indirect branches in kernel"
535 default y
536 + select STACK_VALIDATION if HAVE_STACK_VALIDATION
537 help
538 Compile kernel with the retpoline compiler options to guard against
539 kernel-to-user data leaks by avoiding speculative indirect
540 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
541 index fad55160dcb9..498c1b812300 100644
542 --- a/arch/x86/Makefile
543 +++ b/arch/x86/Makefile
544 @@ -232,10 +232,9 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
545
546 # Avoid indirect branches in kernel to deal with Spectre
547 ifdef CONFIG_RETPOLINE
548 - RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
549 - ifneq ($(RETPOLINE_CFLAGS),)
550 - KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
551 - endif
552 +ifneq ($(RETPOLINE_CFLAGS),)
553 + KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
554 +endif
555 endif
556
557 archscripts: scripts_basic
558 diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
559 index dce7092ab24a..5d10b7a85cad 100644
560 --- a/arch/x86/entry/calling.h
561 +++ b/arch/x86/entry/calling.h
562 @@ -97,7 +97,7 @@ For 32-bit we have the following conventions - kernel is built with
563
564 #define SIZEOF_PTREGS 21*8
565
566 -.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
567 +.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
568 /*
569 * Push registers and sanitize registers of values that a
570 * speculation attack might otherwise want to exploit. The
571 @@ -105,32 +105,41 @@ For 32-bit we have the following conventions - kernel is built with
572 * could be put to use in a speculative execution gadget.
573 * Interleave XOR with PUSH for better uop scheduling:
574 */
575 + .if \save_ret
576 + pushq %rsi /* pt_regs->si */
577 + movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
578 + movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */
579 + .else
580 pushq %rdi /* pt_regs->di */
581 pushq %rsi /* pt_regs->si */
582 + .endif
583 pushq \rdx /* pt_regs->dx */
584 pushq %rcx /* pt_regs->cx */
585 pushq \rax /* pt_regs->ax */
586 pushq %r8 /* pt_regs->r8 */
587 - xorq %r8, %r8 /* nospec r8 */
588 + xorl %r8d, %r8d /* nospec r8 */
589 pushq %r9 /* pt_regs->r9 */
590 - xorq %r9, %r9 /* nospec r9 */
591 + xorl %r9d, %r9d /* nospec r9 */
592 pushq %r10 /* pt_regs->r10 */
593 - xorq %r10, %r10 /* nospec r10 */
594 + xorl %r10d, %r10d /* nospec r10 */
595 pushq %r11 /* pt_regs->r11 */
596 - xorq %r11, %r11 /* nospec r11*/
597 + xorl %r11d, %r11d /* nospec r11*/
598 pushq %rbx /* pt_regs->rbx */
599 xorl %ebx, %ebx /* nospec rbx*/
600 pushq %rbp /* pt_regs->rbp */
601 xorl %ebp, %ebp /* nospec rbp*/
602 pushq %r12 /* pt_regs->r12 */
603 - xorq %r12, %r12 /* nospec r12*/
604 + xorl %r12d, %r12d /* nospec r12*/
605 pushq %r13 /* pt_regs->r13 */
606 - xorq %r13, %r13 /* nospec r13*/
607 + xorl %r13d, %r13d /* nospec r13*/
608 pushq %r14 /* pt_regs->r14 */
609 - xorq %r14, %r14 /* nospec r14*/
610 + xorl %r14d, %r14d /* nospec r14*/
611 pushq %r15 /* pt_regs->r15 */
612 - xorq %r15, %r15 /* nospec r15*/
613 + xorl %r15d, %r15d /* nospec r15*/
614 UNWIND_HINT_REGS
615 + .if \save_ret
616 + pushq %rsi /* return address on top of stack */
617 + .endif
618 .endm
619
620 .macro POP_REGS pop_rdi=1 skip_r11rcx=0
621 diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
622 index 2a35b1e0fb90..60c4c342316c 100644
623 --- a/arch/x86/entry/entry_32.S
624 +++ b/arch/x86/entry/entry_32.S
625 @@ -252,8 +252,7 @@ ENTRY(__switch_to_asm)
626 * exist, overwrite the RSB with entries which capture
627 * speculative execution to prevent attack.
628 */
629 - /* Clobbers %ebx */
630 - FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
631 + FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
632 #endif
633
634 /* restore callee-saved registers */
635 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
636 index 68a2d76e4f8f..6f3cc15e0c73 100644
637 --- a/arch/x86/entry/entry_64.S
638 +++ b/arch/x86/entry/entry_64.S
639 @@ -360,8 +360,7 @@ ENTRY(__switch_to_asm)
640 * exist, overwrite the RSB with entries which capture
641 * speculative execution to prevent attack.
642 */
643 - /* Clobbers %rbx */
644 - FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
645 + FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
646 #endif
647
648 /* restore callee-saved registers */
649 @@ -867,12 +866,8 @@ ENTRY(\sym)
650 pushq $-1 /* ORIG_RAX: no syscall to restart */
651 .endif
652
653 - /* Save all registers in pt_regs */
654 - PUSH_AND_CLEAR_REGS
655 - ENCODE_FRAME_POINTER
656 -
657 .if \paranoid < 2
658 - testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
659 + testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */
660 jnz .Lfrom_usermode_switch_stack_\@
661 .endif
662
663 @@ -1117,13 +1112,15 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
664 #endif
665
666 /*
667 - * Switch gs if needed.
668 + * Save all registers in pt_regs, and switch gs if needed.
669 * Use slow, but surefire "are we in kernel?" check.
670 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
671 */
672 ENTRY(paranoid_entry)
673 UNWIND_HINT_FUNC
674 cld
675 + PUSH_AND_CLEAR_REGS save_ret=1
676 + ENCODE_FRAME_POINTER 8
677 movl $1, %ebx
678 movl $MSR_GS_BASE, %ecx
679 rdmsr
680 @@ -1168,12 +1165,14 @@ ENTRY(paranoid_exit)
681 END(paranoid_exit)
682
683 /*
684 - * Switch gs if needed.
685 + * Save all registers in pt_regs, and switch GS if needed.
686 * Return: EBX=0: came from user mode; EBX=1: otherwise
687 */
688 ENTRY(error_entry)
689 - UNWIND_HINT_REGS offset=8
690 + UNWIND_HINT_FUNC
691 cld
692 + PUSH_AND_CLEAR_REGS save_ret=1
693 + ENCODE_FRAME_POINTER 8
694 testb $3, CS+8(%rsp)
695 jz .Lerror_kernelspace
696
697 @@ -1564,8 +1563,6 @@ end_repeat_nmi:
698 * frame to point back to repeat_nmi.
699 */
700 pushq $-1 /* ORIG_RAX: no syscall to restart */
701 - PUSH_AND_CLEAR_REGS
702 - ENCODE_FRAME_POINTER
703
704 /*
705 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
706 diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
707 index fd65e016e413..364ea4a207be 100644
708 --- a/arch/x86/entry/entry_64_compat.S
709 +++ b/arch/x86/entry/entry_64_compat.S
710 @@ -85,25 +85,25 @@ ENTRY(entry_SYSENTER_compat)
711 pushq %rcx /* pt_regs->cx */
712 pushq $-ENOSYS /* pt_regs->ax */
713 pushq $0 /* pt_regs->r8 = 0 */
714 - xorq %r8, %r8 /* nospec r8 */
715 + xorl %r8d, %r8d /* nospec r8 */
716 pushq $0 /* pt_regs->r9 = 0 */
717 - xorq %r9, %r9 /* nospec r9 */
718 + xorl %r9d, %r9d /* nospec r9 */
719 pushq $0 /* pt_regs->r10 = 0 */
720 - xorq %r10, %r10 /* nospec r10 */
721 + xorl %r10d, %r10d /* nospec r10 */
722 pushq $0 /* pt_regs->r11 = 0 */
723 - xorq %r11, %r11 /* nospec r11 */
724 + xorl %r11d, %r11d /* nospec r11 */
725 pushq %rbx /* pt_regs->rbx */
726 xorl %ebx, %ebx /* nospec rbx */
727 pushq %rbp /* pt_regs->rbp (will be overwritten) */
728 xorl %ebp, %ebp /* nospec rbp */
729 pushq $0 /* pt_regs->r12 = 0 */
730 - xorq %r12, %r12 /* nospec r12 */
731 + xorl %r12d, %r12d /* nospec r12 */
732 pushq $0 /* pt_regs->r13 = 0 */
733 - xorq %r13, %r13 /* nospec r13 */
734 + xorl %r13d, %r13d /* nospec r13 */
735 pushq $0 /* pt_regs->r14 = 0 */
736 - xorq %r14, %r14 /* nospec r14 */
737 + xorl %r14d, %r14d /* nospec r14 */
738 pushq $0 /* pt_regs->r15 = 0 */
739 - xorq %r15, %r15 /* nospec r15 */
740 + xorl %r15d, %r15d /* nospec r15 */
741 cld
742
743 /*
744 @@ -224,25 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
745 pushq %rbp /* pt_regs->cx (stashed in bp) */
746 pushq $-ENOSYS /* pt_regs->ax */
747 pushq $0 /* pt_regs->r8 = 0 */
748 - xorq %r8, %r8 /* nospec r8 */
749 + xorl %r8d, %r8d /* nospec r8 */
750 pushq $0 /* pt_regs->r9 = 0 */
751 - xorq %r9, %r9 /* nospec r9 */
752 + xorl %r9d, %r9d /* nospec r9 */
753 pushq $0 /* pt_regs->r10 = 0 */
754 - xorq %r10, %r10 /* nospec r10 */
755 + xorl %r10d, %r10d /* nospec r10 */
756 pushq $0 /* pt_regs->r11 = 0 */
757 - xorq %r11, %r11 /* nospec r11 */
758 + xorl %r11d, %r11d /* nospec r11 */
759 pushq %rbx /* pt_regs->rbx */
760 xorl %ebx, %ebx /* nospec rbx */
761 pushq %rbp /* pt_regs->rbp (will be overwritten) */
762 xorl %ebp, %ebp /* nospec rbp */
763 pushq $0 /* pt_regs->r12 = 0 */
764 - xorq %r12, %r12 /* nospec r12 */
765 + xorl %r12d, %r12d /* nospec r12 */
766 pushq $0 /* pt_regs->r13 = 0 */
767 - xorq %r13, %r13 /* nospec r13 */
768 + xorl %r13d, %r13d /* nospec r13 */
769 pushq $0 /* pt_regs->r14 = 0 */
770 - xorq %r14, %r14 /* nospec r14 */
771 + xorl %r14d, %r14d /* nospec r14 */
772 pushq $0 /* pt_regs->r15 = 0 */
773 - xorq %r15, %r15 /* nospec r15 */
774 + xorl %r15d, %r15d /* nospec r15 */
775
776 /*
777 * User mode is traced as though IRQs are on, and SYSENTER
778 @@ -298,9 +298,9 @@ sysret32_from_system_call:
779 */
780 SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9
781
782 - xorq %r8, %r8
783 - xorq %r9, %r9
784 - xorq %r10, %r10
785 + xorl %r8d, %r8d
786 + xorl %r9d, %r9d
787 + xorl %r10d, %r10d
788 swapgs
789 sysretl
790 END(entry_SYSCALL_compat)
791 @@ -358,25 +358,25 @@ ENTRY(entry_INT80_compat)
792 pushq %rcx /* pt_regs->cx */
793 pushq $-ENOSYS /* pt_regs->ax */
794 pushq $0 /* pt_regs->r8 = 0 */
795 - xorq %r8, %r8 /* nospec r8 */
796 + xorl %r8d, %r8d /* nospec r8 */
797 pushq $0 /* pt_regs->r9 = 0 */
798 - xorq %r9, %r9 /* nospec r9 */
799 + xorl %r9d, %r9d /* nospec r9 */
800 pushq $0 /* pt_regs->r10 = 0 */
801 - xorq %r10, %r10 /* nospec r10 */
802 + xorl %r10d, %r10d /* nospec r10 */
803 pushq $0 /* pt_regs->r11 = 0 */
804 - xorq %r11, %r11 /* nospec r11 */
805 + xorl %r11d, %r11d /* nospec r11 */
806 pushq %rbx /* pt_regs->rbx */
807 xorl %ebx, %ebx /* nospec rbx */
808 pushq %rbp /* pt_regs->rbp */
809 xorl %ebp, %ebp /* nospec rbp */
810 pushq %r12 /* pt_regs->r12 */
811 - xorq %r12, %r12 /* nospec r12 */
812 + xorl %r12d, %r12d /* nospec r12 */
813 pushq %r13 /* pt_regs->r13 */
814 - xorq %r13, %r13 /* nospec r13 */
815 + xorl %r13d, %r13d /* nospec r13 */
816 pushq %r14 /* pt_regs->r14 */
817 - xorq %r14, %r14 /* nospec r14 */
818 + xorl %r14d, %r14d /* nospec r14 */
819 pushq %r15 /* pt_regs->r15 */
820 - xorq %r15, %r15 /* nospec r15 */
821 + xorl %r15d, %r15d /* nospec r15 */
822 cld
823
824 /*
825 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
826 index 4d4015ddcf26..c356098b6fb9 100644
827 --- a/arch/x86/include/asm/apm.h
828 +++ b/arch/x86/include/asm/apm.h
829 @@ -7,6 +7,8 @@
830 #ifndef _ASM_X86_MACH_DEFAULT_APM_H
831 #define _ASM_X86_MACH_DEFAULT_APM_H
832
833 +#include <asm/nospec-branch.h>
834 +
835 #ifdef APM_ZERO_SEGS
836 # define APM_DO_ZERO_SEGS \
837 "pushl %%ds\n\t" \
838 @@ -32,6 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
839 * N.B. We do NOT need a cld after the BIOS call
840 * because we always save and restore the flags.
841 */
842 + firmware_restrict_branch_speculation_start();
843 __asm__ __volatile__(APM_DO_ZERO_SEGS
844 "pushl %%edi\n\t"
845 "pushl %%ebp\n\t"
846 @@ -44,6 +47,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
847 "=S" (*esi)
848 : "a" (func), "b" (ebx_in), "c" (ecx_in)
849 : "memory", "cc");
850 + firmware_restrict_branch_speculation_end();
851 }
852
853 static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
854 @@ -56,6 +60,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
855 * N.B. We do NOT need a cld after the BIOS call
856 * because we always save and restore the flags.
857 */
858 + firmware_restrict_branch_speculation_start();
859 __asm__ __volatile__(APM_DO_ZERO_SEGS
860 "pushl %%edi\n\t"
861 "pushl %%ebp\n\t"
862 @@ -68,6 +73,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
863 "=S" (si)
864 : "a" (func), "b" (ebx_in), "c" (ecx_in)
865 : "memory", "cc");
866 + firmware_restrict_branch_speculation_end();
867 return error;
868 }
869
870 diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
871 index 4d111616524b..1908214b9125 100644
872 --- a/arch/x86/include/asm/asm-prototypes.h
873 +++ b/arch/x86/include/asm/asm-prototypes.h
874 @@ -38,7 +38,4 @@ INDIRECT_THUNK(dx)
875 INDIRECT_THUNK(si)
876 INDIRECT_THUNK(di)
877 INDIRECT_THUNK(bp)
878 -asmlinkage void __fill_rsb(void);
879 -asmlinkage void __clear_rsb(void);
880 -
881 #endif /* CONFIG_RETPOLINE */
882 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
883 index 73b5fff159a4..66c14347c502 100644
884 --- a/arch/x86/include/asm/cpufeatures.h
885 +++ b/arch/x86/include/asm/cpufeatures.h
886 @@ -211,6 +211,7 @@
887 #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
888
889 #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
890 +#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
891
892 /* Virtualization flags: Linux defined, word 8 */
893 #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
894 diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
895 index 85f6ccb80b91..a399c1ebf6f0 100644
896 --- a/arch/x86/include/asm/efi.h
897 +++ b/arch/x86/include/asm/efi.h
898 @@ -6,6 +6,7 @@
899 #include <asm/pgtable.h>
900 #include <asm/processor-flags.h>
901 #include <asm/tlb.h>
902 +#include <asm/nospec-branch.h>
903
904 /*
905 * We map the EFI regions needed for runtime services non-contiguously,
906 @@ -36,8 +37,18 @@
907
908 extern asmlinkage unsigned long efi_call_phys(void *, ...);
909
910 -#define arch_efi_call_virt_setup() kernel_fpu_begin()
911 -#define arch_efi_call_virt_teardown() kernel_fpu_end()
912 +#define arch_efi_call_virt_setup() \
913 +({ \
914 + kernel_fpu_begin(); \
915 + firmware_restrict_branch_speculation_start(); \
916 +})
917 +
918 +#define arch_efi_call_virt_teardown() \
919 +({ \
920 + firmware_restrict_branch_speculation_end(); \
921 + kernel_fpu_end(); \
922 +})
923 +
924
925 /*
926 * Wrap all the virtual calls in a way that forces the parameters on the stack.
927 @@ -73,6 +84,7 @@ struct efi_scratch {
928 efi_sync_low_kernel_mappings(); \
929 preempt_disable(); \
930 __kernel_fpu_begin(); \
931 + firmware_restrict_branch_speculation_start(); \
932 \
933 if (efi_scratch.use_pgd) { \
934 efi_scratch.prev_cr3 = __read_cr3(); \
935 @@ -91,6 +103,7 @@ struct efi_scratch {
936 __flush_tlb_all(); \
937 } \
938 \
939 + firmware_restrict_branch_speculation_end(); \
940 __kernel_fpu_end(); \
941 preempt_enable(); \
942 })
943 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
944 index c931b88982a0..1de72ce514cd 100644
945 --- a/arch/x86/include/asm/mmu_context.h
946 +++ b/arch/x86/include/asm/mmu_context.h
947 @@ -74,6 +74,7 @@ static inline void *ldt_slot_va(int slot)
948 return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
949 #else
950 BUG();
951 + return (void *)fix_to_virt(FIX_HOLE);
952 #endif
953 }
954
955 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
956 index 81a1be326571..d0dabeae0505 100644
957 --- a/arch/x86/include/asm/nospec-branch.h
958 +++ b/arch/x86/include/asm/nospec-branch.h
959 @@ -8,6 +8,50 @@
960 #include <asm/cpufeatures.h>
961 #include <asm/msr-index.h>
962
963 +/*
964 + * Fill the CPU return stack buffer.
965 + *
966 + * Each entry in the RSB, if used for a speculative 'ret', contains an
967 + * infinite 'pause; lfence; jmp' loop to capture speculative execution.
968 + *
969 + * This is required in various cases for retpoline and IBRS-based
970 + * mitigations for the Spectre variant 2 vulnerability. Sometimes to
971 + * eliminate potentially bogus entries from the RSB, and sometimes
972 + * purely to ensure that it doesn't get empty, which on some CPUs would
973 + * allow predictions from other (unwanted!) sources to be used.
974 + *
975 + * We define a CPP macro such that it can be used from both .S files and
976 + * inline assembly. It's possible to do a .macro and then include that
977 + * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
978 + */
979 +
980 +#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
981 +#define RSB_FILL_LOOPS 16 /* To avoid underflow */
982 +
983 +/*
984 + * Google experimented with loop-unrolling and this turned out to be
985 + * the optimal version — two calls, each with their own speculation
986 + * trap should their return address end up getting used, in a loop.
987 + */
988 +#define __FILL_RETURN_BUFFER(reg, nr, sp) \
989 + mov $(nr/2), reg; \
990 +771: \
991 + call 772f; \
992 +773: /* speculation trap */ \
993 + pause; \
994 + lfence; \
995 + jmp 773b; \
996 +772: \
997 + call 774f; \
998 +775: /* speculation trap */ \
999 + pause; \
1000 + lfence; \
1001 + jmp 775b; \
1002 +774: \
1003 + dec reg; \
1004 + jnz 771b; \
1005 + add $(BITS_PER_LONG/8) * nr, sp;
1006 +
1007 #ifdef __ASSEMBLY__
1008
1009 /*
1010 @@ -23,6 +67,18 @@
1011 .popsection
1012 .endm
1013
1014 +/*
1015 + * This should be used immediately before an indirect jump/call. It tells
1016 + * objtool the subsequent indirect jump/call is vouched safe for retpoline
1017 + * builds.
1018 + */
1019 +.macro ANNOTATE_RETPOLINE_SAFE
1020 + .Lannotate_\@:
1021 + .pushsection .discard.retpoline_safe
1022 + _ASM_PTR .Lannotate_\@
1023 + .popsection
1024 +.endm
1025 +
1026 /*
1027 * These are the bare retpoline primitives for indirect jmp and call.
1028 * Do not use these directly; they only exist to make the ALTERNATIVE
1029 @@ -59,9 +115,9 @@
1030 .macro JMP_NOSPEC reg:req
1031 #ifdef CONFIG_RETPOLINE
1032 ANNOTATE_NOSPEC_ALTERNATIVE
1033 - ALTERNATIVE_2 __stringify(jmp *\reg), \
1034 + ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \
1035 __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
1036 - __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
1037 + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
1038 #else
1039 jmp *\reg
1040 #endif
1041 @@ -70,18 +126,25 @@
1042 .macro CALL_NOSPEC reg:req
1043 #ifdef CONFIG_RETPOLINE
1044 ANNOTATE_NOSPEC_ALTERNATIVE
1045 - ALTERNATIVE_2 __stringify(call *\reg), \
1046 + ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
1047 __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
1048 - __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
1049 + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
1050 #else
1051 call *\reg
1052 #endif
1053 .endm
1054
1055 -/* This clobbers the BX register */
1056 -.macro FILL_RETURN_BUFFER nr:req ftr:req
1057 + /*
1058 + * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
1059 + * monstrosity above, manually.
1060 + */
1061 +.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
1062 #ifdef CONFIG_RETPOLINE
1063 - ALTERNATIVE "", "call __clear_rsb", \ftr
1064 + ANNOTATE_NOSPEC_ALTERNATIVE
1065 + ALTERNATIVE "jmp .Lskip_rsb_\@", \
1066 + __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
1067 + \ftr
1068 +.Lskip_rsb_\@:
1069 #endif
1070 .endm
1071
1072 @@ -93,6 +156,12 @@
1073 ".long 999b - .\n\t" \
1074 ".popsection\n\t"
1075
1076 +#define ANNOTATE_RETPOLINE_SAFE \
1077 + "999:\n\t" \
1078 + ".pushsection .discard.retpoline_safe\n\t" \
1079 + _ASM_PTR " 999b\n\t" \
1080 + ".popsection\n\t"
1081 +
1082 #if defined(CONFIG_X86_64) && defined(RETPOLINE)
1083
1084 /*
1085 @@ -102,6 +171,7 @@
1086 # define CALL_NOSPEC \
1087 ANNOTATE_NOSPEC_ALTERNATIVE \
1088 ALTERNATIVE( \
1089 + ANNOTATE_RETPOLINE_SAFE \
1090 "call *%[thunk_target]\n", \
1091 "call __x86_indirect_thunk_%V[thunk_target]\n", \
1092 X86_FEATURE_RETPOLINE)
1093 @@ -156,26 +226,54 @@ extern char __indirect_thunk_end[];
1094 static inline void vmexit_fill_RSB(void)
1095 {
1096 #ifdef CONFIG_RETPOLINE
1097 - alternative_input("",
1098 - "call __fill_rsb",
1099 - X86_FEATURE_RETPOLINE,
1100 - ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
1101 + unsigned long loops;
1102 +
1103 + asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
1104 + ALTERNATIVE("jmp 910f",
1105 + __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
1106 + X86_FEATURE_RETPOLINE)
1107 + "910:"
1108 + : "=r" (loops), ASM_CALL_CONSTRAINT
1109 + : : "memory" );
1110 #endif
1111 }
1112
1113 +#define alternative_msr_write(_msr, _val, _feature) \
1114 + asm volatile(ALTERNATIVE("", \
1115 + "movl %[msr], %%ecx\n\t" \
1116 + "movl %[val], %%eax\n\t" \
1117 + "movl $0, %%edx\n\t" \
1118 + "wrmsr", \
1119 + _feature) \
1120 + : : [msr] "i" (_msr), [val] "i" (_val) \
1121 + : "eax", "ecx", "edx", "memory")
1122 +
1123 static inline void indirect_branch_prediction_barrier(void)
1124 {
1125 - asm volatile(ALTERNATIVE("",
1126 - "movl %[msr], %%ecx\n\t"
1127 - "movl %[val], %%eax\n\t"
1128 - "movl $0, %%edx\n\t"
1129 - "wrmsr",
1130 - X86_FEATURE_USE_IBPB)
1131 - : : [msr] "i" (MSR_IA32_PRED_CMD),
1132 - [val] "i" (PRED_CMD_IBPB)
1133 - : "eax", "ecx", "edx", "memory");
1134 + alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
1135 + X86_FEATURE_USE_IBPB);
1136 }
1137
1138 +/*
1139 + * With retpoline, we must use IBRS to restrict branch prediction
1140 + * before calling into firmware.
1141 + *
1142 + * (Implemented as CPP macros due to header hell.)
1143 + */
1144 +#define firmware_restrict_branch_speculation_start() \
1145 +do { \
1146 + preempt_disable(); \
1147 + alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
1148 + X86_FEATURE_USE_IBRS_FW); \
1149 +} while (0)
1150 +
1151 +#define firmware_restrict_branch_speculation_end() \
1152 +do { \
1153 + alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
1154 + X86_FEATURE_USE_IBRS_FW); \
1155 + preempt_enable(); \
1156 +} while (0)
1157 +
1158 #endif /* __ASSEMBLY__ */
1159
1160 /*
1161 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
1162 index 554841fab717..c83a2f418cea 100644
1163 --- a/arch/x86/include/asm/paravirt.h
1164 +++ b/arch/x86/include/asm/paravirt.h
1165 @@ -7,6 +7,7 @@
1166 #ifdef CONFIG_PARAVIRT
1167 #include <asm/pgtable_types.h>
1168 #include <asm/asm.h>
1169 +#include <asm/nospec-branch.h>
1170
1171 #include <asm/paravirt_types.h>
1172
1173 @@ -879,23 +880,27 @@ extern void default_banner(void);
1174
1175 #define INTERRUPT_RETURN \
1176 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1177 - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1178 + ANNOTATE_RETPOLINE_SAFE; \
1179 + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);)
1180
1181 #define DISABLE_INTERRUPTS(clobbers) \
1182 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1183 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1184 + ANNOTATE_RETPOLINE_SAFE; \
1185 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1186 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1187
1188 #define ENABLE_INTERRUPTS(clobbers) \
1189 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1190 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1191 + ANNOTATE_RETPOLINE_SAFE; \
1192 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1193 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1194
1195 #ifdef CONFIG_X86_32
1196 #define GET_CR0_INTO_EAX \
1197 push %ecx; push %edx; \
1198 + ANNOTATE_RETPOLINE_SAFE; \
1199 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1200 pop %edx; pop %ecx
1201 #else /* !CONFIG_X86_32 */
1202 @@ -917,21 +922,25 @@ extern void default_banner(void);
1203 */
1204 #define SWAPGS \
1205 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1206 - call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
1207 + ANNOTATE_RETPOLINE_SAFE; \
1208 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
1209 )
1210
1211 #define GET_CR2_INTO_RAX \
1212 - call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
1213 + ANNOTATE_RETPOLINE_SAFE; \
1214 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2);
1215
1216 #define USERGS_SYSRET64 \
1217 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
1218 CLBR_NONE, \
1219 - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1220 + ANNOTATE_RETPOLINE_SAFE; \
1221 + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);)
1222
1223 #ifdef CONFIG_DEBUG_ENTRY
1224 #define SAVE_FLAGS(clobbers) \
1225 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
1226 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1227 + ANNOTATE_RETPOLINE_SAFE; \
1228 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \
1229 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1230 #endif
1231 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
1232 index f624f1f10316..180bc0bff0fb 100644
1233 --- a/arch/x86/include/asm/paravirt_types.h
1234 +++ b/arch/x86/include/asm/paravirt_types.h
1235 @@ -43,6 +43,7 @@
1236 #include <asm/desc_defs.h>
1237 #include <asm/kmap_types.h>
1238 #include <asm/pgtable_types.h>
1239 +#include <asm/nospec-branch.h>
1240
1241 struct page;
1242 struct thread_struct;
1243 @@ -392,7 +393,9 @@ int paravirt_disable_iospace(void);
1244 * offset into the paravirt_patch_template structure, and can therefore be
1245 * freely converted back into a structure offset.
1246 */
1247 -#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
1248 +#define PARAVIRT_CALL \
1249 + ANNOTATE_RETPOLINE_SAFE \
1250 + "call *%c[paravirt_opptr];"
1251
1252 /*
1253 * These macros are intended to wrap calls through one of the paravirt
1254 diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
1255 index 4e44250e7d0d..d65171120e90 100644
1256 --- a/arch/x86/include/asm/refcount.h
1257 +++ b/arch/x86/include/asm/refcount.h
1258 @@ -67,13 +67,13 @@ static __always_inline __must_check
1259 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
1260 {
1261 GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
1262 - r->refs.counter, "er", i, "%0", e);
1263 + r->refs.counter, "er", i, "%0", e, "cx");
1264 }
1265
1266 static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
1267 {
1268 GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
1269 - r->refs.counter, "%0", e);
1270 + r->refs.counter, "%0", e, "cx");
1271 }
1272
1273 static __always_inline __must_check
1274 diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
1275 index f91c365e57c3..4914a3e7c803 100644
1276 --- a/arch/x86/include/asm/rmwcc.h
1277 +++ b/arch/x86/include/asm/rmwcc.h
1278 @@ -2,8 +2,7 @@
1279 #ifndef _ASM_X86_RMWcc
1280 #define _ASM_X86_RMWcc
1281
1282 -#define __CLOBBERS_MEM "memory"
1283 -#define __CLOBBERS_MEM_CC_CX "memory", "cc", "cx"
1284 +#define __CLOBBERS_MEM(clb...) "memory", ## clb
1285
1286 #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
1287
1288 @@ -40,18 +39,19 @@ do { \
1289 #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
1290
1291 #define GEN_UNARY_RMWcc(op, var, arg0, cc) \
1292 - __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM)
1293 + __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
1294
1295 -#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc) \
1296 +#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\
1297 __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \
1298 - __CLOBBERS_MEM_CC_CX)
1299 + __CLOBBERS_MEM(clobbers))
1300
1301 #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
1302 __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \
1303 - __CLOBBERS_MEM, vcon (val))
1304 + __CLOBBERS_MEM(), vcon (val))
1305
1306 -#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc) \
1307 +#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc, \
1308 + clobbers...) \
1309 __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \
1310 - __CLOBBERS_MEM_CC_CX, vcon (val))
1311 + __CLOBBERS_MEM(clobbers), vcon (val))
1312
1313 #endif /* _ASM_X86_RMWcc */
1314 diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
1315 index d6baf23782bc..5c019d23d06b 100644
1316 --- a/arch/x86/include/asm/sections.h
1317 +++ b/arch/x86/include/asm/sections.h
1318 @@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[];
1319
1320 #if defined(CONFIG_X86_64)
1321 extern char __end_rodata_hpage_align[];
1322 +extern char __entry_trampoline_start[], __entry_trampoline_end[];
1323 #endif
1324
1325 #endif /* _ASM_X86_SECTIONS_H */
1326 diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
1327 index 91723461dc1f..435db58a7bad 100644
1328 --- a/arch/x86/include/uapi/asm/mce.h
1329 +++ b/arch/x86/include/uapi/asm/mce.h
1330 @@ -30,6 +30,7 @@ struct mce {
1331 __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
1332 __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
1333 __u64 ppin; /* Protected Processor Inventory Number */
1334 + __u32 microcode;/* Microcode revision */
1335 };
1336
1337 #define MCE_GET_RECORD_LEN _IOR('M', 1, int)
1338 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
1339 index d71c8b54b696..bfca937bdcc3 100644
1340 --- a/arch/x86/kernel/cpu/bugs.c
1341 +++ b/arch/x86/kernel/cpu/bugs.c
1342 @@ -300,6 +300,15 @@ static void __init spectre_v2_select_mitigation(void)
1343 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
1344 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
1345 }
1346 +
1347 + /*
1348 + * Retpoline means the kernel is safe because it has no indirect
1349 + * branches. But firmware isn't, so use IBRS to protect that.
1350 + */
1351 + if (boot_cpu_has(X86_FEATURE_IBRS)) {
1352 + setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
1353 + pr_info("Enabling Restricted Speculation for firmware calls\n");
1354 + }
1355 }
1356
1357 #undef pr_fmt
1358 @@ -326,8 +335,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
1359 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1360 return sprintf(buf, "Not affected\n");
1361
1362 - return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1363 + return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1364 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
1365 + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1366 spectre_v2_module_string());
1367 }
1368 #endif
1369 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
1370 index d19e903214b4..4aa9fd379390 100644
1371 --- a/arch/x86/kernel/cpu/intel.c
1372 +++ b/arch/x86/kernel/cpu/intel.c
1373 @@ -144,6 +144,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
1374 {
1375 int i;
1376
1377 + /*
1378 + * We know that the hypervisor lie to us on the microcode version so
1379 + * we may as well hope that it is running the correct version.
1380 + */
1381 + if (cpu_has(c, X86_FEATURE_HYPERVISOR))
1382 + return false;
1383 +
1384 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
1385 if (c->x86_model == spectre_bad_microcodes[i].model &&
1386 c->x86_stepping == spectre_bad_microcodes[i].stepping)
1387 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
1388 index 73237aa271ea..e13d652fc30a 100644
1389 --- a/arch/x86/kernel/cpu/mcheck/mce.c
1390 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
1391 @@ -57,6 +57,9 @@
1392
1393 static DEFINE_MUTEX(mce_log_mutex);
1394
1395 +/* sysfs synchronization */
1396 +static DEFINE_MUTEX(mce_sysfs_mutex);
1397 +
1398 #define CREATE_TRACE_POINTS
1399 #include <trace/events/mce.h>
1400
1401 @@ -131,6 +134,8 @@ void mce_setup(struct mce *m)
1402
1403 if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
1404 rdmsrl(MSR_PPIN, m->ppin);
1405 +
1406 + m->microcode = boot_cpu_data.microcode;
1407 }
1408
1409 DEFINE_PER_CPU(struct mce, injectm);
1410 @@ -263,7 +268,7 @@ static void __print_mce(struct mce *m)
1411 */
1412 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
1413 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
1414 - cpu_data(m->extcpu).microcode);
1415 + m->microcode);
1416 }
1417
1418 static void print_mce(struct mce *m)
1419 @@ -2081,6 +2086,7 @@ static ssize_t set_ignore_ce(struct device *s,
1420 if (kstrtou64(buf, 0, &new) < 0)
1421 return -EINVAL;
1422
1423 + mutex_lock(&mce_sysfs_mutex);
1424 if (mca_cfg.ignore_ce ^ !!new) {
1425 if (new) {
1426 /* disable ce features */
1427 @@ -2093,6 +2099,8 @@ static ssize_t set_ignore_ce(struct device *s,
1428 on_each_cpu(mce_enable_ce, (void *)1, 1);
1429 }
1430 }
1431 + mutex_unlock(&mce_sysfs_mutex);
1432 +
1433 return size;
1434 }
1435
1436 @@ -2105,6 +2113,7 @@ static ssize_t set_cmci_disabled(struct device *s,
1437 if (kstrtou64(buf, 0, &new) < 0)
1438 return -EINVAL;
1439
1440 + mutex_lock(&mce_sysfs_mutex);
1441 if (mca_cfg.cmci_disabled ^ !!new) {
1442 if (new) {
1443 /* disable cmci */
1444 @@ -2116,6 +2125,8 @@ static ssize_t set_cmci_disabled(struct device *s,
1445 on_each_cpu(mce_enable_ce, NULL, 1);
1446 }
1447 }
1448 + mutex_unlock(&mce_sysfs_mutex);
1449 +
1450 return size;
1451 }
1452
1453 @@ -2123,8 +2134,19 @@ static ssize_t store_int_with_restart(struct device *s,
1454 struct device_attribute *attr,
1455 const char *buf, size_t size)
1456 {
1457 - ssize_t ret = device_store_int(s, attr, buf, size);
1458 + unsigned long old_check_interval = check_interval;
1459 + ssize_t ret = device_store_ulong(s, attr, buf, size);
1460 +
1461 + if (check_interval == old_check_interval)
1462 + return ret;
1463 +
1464 + if (check_interval < 1)
1465 + check_interval = 1;
1466 +
1467 + mutex_lock(&mce_sysfs_mutex);
1468 mce_restart();
1469 + mutex_unlock(&mce_sysfs_mutex);
1470 +
1471 return ret;
1472 }
1473
1474 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
1475 index 04a625f0fcda..0f545b3cf926 100644
1476 --- a/arch/x86/kernel/head_64.S
1477 +++ b/arch/x86/kernel/head_64.S
1478 @@ -23,6 +23,7 @@
1479 #include <asm/nops.h>
1480 #include "../entry/calling.h"
1481 #include <asm/export.h>
1482 +#include <asm/nospec-branch.h>
1483
1484 #ifdef CONFIG_PARAVIRT
1485 #include <asm/asm-offsets.h>
1486 @@ -134,6 +135,7 @@ ENTRY(secondary_startup_64)
1487
1488 /* Ensure I am executing from virtual addresses */
1489 movq $1f, %rax
1490 + ANNOTATE_RETPOLINE_SAFE
1491 jmp *%rax
1492 1:
1493 UNWIND_HINT_EMPTY
1494 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
1495 index 0742491cbb73..ce06ec9c2323 100644
1496 --- a/arch/x86/kernel/kprobes/core.c
1497 +++ b/arch/x86/kernel/kprobes/core.c
1498 @@ -1149,10 +1149,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler);
1499
1500 bool arch_within_kprobe_blacklist(unsigned long addr)
1501 {
1502 + bool is_in_entry_trampoline_section = false;
1503 +
1504 +#ifdef CONFIG_X86_64
1505 + is_in_entry_trampoline_section =
1506 + (addr >= (unsigned long)__entry_trampoline_start &&
1507 + addr < (unsigned long)__entry_trampoline_end);
1508 +#endif
1509 return (addr >= (unsigned long)__kprobes_text_start &&
1510 addr < (unsigned long)__kprobes_text_end) ||
1511 (addr >= (unsigned long)__entry_text_start &&
1512 - addr < (unsigned long)__entry_text_end);
1513 + addr < (unsigned long)__entry_text_end) ||
1514 + is_in_entry_trampoline_section;
1515 }
1516
1517 int __init arch_init_kprobes(void)
1518 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
1519 index 9b138a06c1a4..b854ebf5851b 100644
1520 --- a/arch/x86/kernel/vmlinux.lds.S
1521 +++ b/arch/x86/kernel/vmlinux.lds.S
1522 @@ -118,9 +118,11 @@ SECTIONS
1523
1524 #ifdef CONFIG_X86_64
1525 . = ALIGN(PAGE_SIZE);
1526 + VMLINUX_SYMBOL(__entry_trampoline_start) = .;
1527 _entry_trampoline = .;
1528 *(.entry_trampoline)
1529 . = ALIGN(PAGE_SIZE);
1530 + VMLINUX_SYMBOL(__entry_trampoline_end) = .;
1531 ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
1532 #endif
1533
1534 diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
1535 index d0a3170e6804..d435c89875c1 100644
1536 --- a/arch/x86/lib/Makefile
1537 +++ b/arch/x86/lib/Makefile
1538 @@ -27,7 +27,6 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
1539 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
1540 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
1541 lib-$(CONFIG_RETPOLINE) += retpoline.o
1542 -OBJECT_FILES_NON_STANDARD_retpoline.o :=y
1543
1544 obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
1545
1546 diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
1547 index 480edc3a5e03..c909961e678a 100644
1548 --- a/arch/x86/lib/retpoline.S
1549 +++ b/arch/x86/lib/retpoline.S
1550 @@ -7,7 +7,6 @@
1551 #include <asm/alternative-asm.h>
1552 #include <asm/export.h>
1553 #include <asm/nospec-branch.h>
1554 -#include <asm/bitsperlong.h>
1555
1556 .macro THUNK reg
1557 .section .text.__x86.indirect_thunk
1558 @@ -47,58 +46,3 @@ GENERATE_THUNK(r13)
1559 GENERATE_THUNK(r14)
1560 GENERATE_THUNK(r15)
1561 #endif
1562 -
1563 -/*
1564 - * Fill the CPU return stack buffer.
1565 - *
1566 - * Each entry in the RSB, if used for a speculative 'ret', contains an
1567 - * infinite 'pause; lfence; jmp' loop to capture speculative execution.
1568 - *
1569 - * This is required in various cases for retpoline and IBRS-based
1570 - * mitigations for the Spectre variant 2 vulnerability. Sometimes to
1571 - * eliminate potentially bogus entries from the RSB, and sometimes
1572 - * purely to ensure that it doesn't get empty, which on some CPUs would
1573 - * allow predictions from other (unwanted!) sources to be used.
1574 - *
1575 - * Google experimented with loop-unrolling and this turned out to be
1576 - * the optimal version - two calls, each with their own speculation
1577 - * trap should their return address end up getting used, in a loop.
1578 - */
1579 -.macro STUFF_RSB nr:req sp:req
1580 - mov $(\nr / 2), %_ASM_BX
1581 - .align 16
1582 -771:
1583 - call 772f
1584 -773: /* speculation trap */
1585 - pause
1586 - lfence
1587 - jmp 773b
1588 - .align 16
1589 -772:
1590 - call 774f
1591 -775: /* speculation trap */
1592 - pause
1593 - lfence
1594 - jmp 775b
1595 - .align 16
1596 -774:
1597 - dec %_ASM_BX
1598 - jnz 771b
1599 - add $((BITS_PER_LONG/8) * \nr), \sp
1600 -.endm
1601 -
1602 -#define RSB_FILL_LOOPS 16 /* To avoid underflow */
1603 -
1604 -ENTRY(__fill_rsb)
1605 - STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
1606 - ret
1607 -END(__fill_rsb)
1608 -EXPORT_SYMBOL_GPL(__fill_rsb)
1609 -
1610 -#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
1611 -
1612 -ENTRY(__clear_rsb)
1613 - STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
1614 - ret
1615 -END(__clear_rsb)
1616 -EXPORT_SYMBOL_GPL(__clear_rsb)
1617 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
1618 index 9150fe2c9b26..4c155ee0f89e 100644
1619 --- a/arch/x86/mm/fault.c
1620 +++ b/arch/x86/mm/fault.c
1621 @@ -1252,10 +1252,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
1622 tsk = current;
1623 mm = tsk->mm;
1624
1625 - /*
1626 - * Detect and handle instructions that would cause a page fault for
1627 - * both a tracked kernel page and a userspace page.
1628 - */
1629 prefetchw(&mm->mmap_sem);
1630
1631 if (unlikely(kmmio_fault(regs, address)))
1632 diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
1633 index 01f682cf77a8..40a6085063d6 100644
1634 --- a/arch/x86/mm/mem_encrypt_boot.S
1635 +++ b/arch/x86/mm/mem_encrypt_boot.S
1636 @@ -15,6 +15,7 @@
1637 #include <asm/page.h>
1638 #include <asm/processor-flags.h>
1639 #include <asm/msr-index.h>
1640 +#include <asm/nospec-branch.h>
1641
1642 .text
1643 .code64
1644 @@ -59,6 +60,7 @@ ENTRY(sme_encrypt_execute)
1645 movq %rax, %r8 /* Workarea encryption routine */
1646 addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
1647
1648 + ANNOTATE_RETPOLINE_SAFE
1649 call *%rax /* Call the encryption routine */
1650
1651 pop %r12
1652 diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
1653 index de53bd15df5a..24bb7598774e 100644
1654 --- a/arch/x86/realmode/rm/trampoline_64.S
1655 +++ b/arch/x86/realmode/rm/trampoline_64.S
1656 @@ -102,7 +102,7 @@ ENTRY(startup_32)
1657 * don't we'll eventually crash trying to execute encrypted
1658 * instructions.
1659 */
1660 - bt $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
1661 + btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
1662 jnc .Ldone
1663 movl $MSR_K8_SYSCFG, %ecx
1664 rdmsr
1665 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
1666 index a2a0dce5114e..19858a146f30 100644
1667 --- a/drivers/block/loop.c
1668 +++ b/drivers/block/loop.c
1669 @@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
1670 struct iov_iter i;
1671 ssize_t bw;
1672
1673 - iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
1674 + iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
1675
1676 file_start_write(file);
1677 bw = vfs_iter_write(file, &i, ppos, 0);
1678 diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1679 index 3cec403a80b3..5294442505cb 100644
1680 --- a/drivers/char/tpm/tpm-interface.c
1681 +++ b/drivers/char/tpm/tpm-interface.c
1682 @@ -413,6 +413,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
1683 if (chip->dev.parent)
1684 pm_runtime_get_sync(chip->dev.parent);
1685
1686 + if (chip->ops->clk_enable != NULL)
1687 + chip->ops->clk_enable(chip, true);
1688 +
1689 /* Store the decision as chip->locality will be changed. */
1690 need_locality = chip->locality == -1;
1691
1692 @@ -489,6 +492,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
1693 chip->locality = -1;
1694 }
1695 out_no_locality:
1696 + if (chip->ops->clk_enable != NULL)
1697 + chip->ops->clk_enable(chip, false);
1698 +
1699 if (chip->dev.parent)
1700 pm_runtime_put_sync(chip->dev.parent);
1701
1702 diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
1703 index ebd0e75a3e4d..50b59a69dc33 100644
1704 --- a/drivers/char/tpm/tpm_tis.c
1705 +++ b/drivers/char/tpm/tpm_tis.c
1706 @@ -132,93 +132,14 @@ static int check_acpi_tpm2(struct device *dev)
1707 }
1708 #endif
1709
1710 -#ifdef CONFIG_X86
1711 -#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
1712 -#define ILB_REMAP_SIZE 0x100
1713 -#define LPC_CNTRL_REG_OFFSET 0x84
1714 -#define LPC_CLKRUN_EN (1 << 2)
1715 -
1716 -static void __iomem *ilb_base_addr;
1717 -
1718 -static inline bool is_bsw(void)
1719 -{
1720 - return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
1721 -}
1722 -
1723 -/**
1724 - * tpm_platform_begin_xfer() - clear LPC CLKRUN_EN i.e. clocks will be running
1725 - */
1726 -static void tpm_platform_begin_xfer(void)
1727 -{
1728 - u32 clkrun_val;
1729 -
1730 - if (!is_bsw())
1731 - return;
1732 -
1733 - clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
1734 -
1735 - /* Disable LPC CLKRUN# */
1736 - clkrun_val &= ~LPC_CLKRUN_EN;
1737 - iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
1738 -
1739 - /*
1740 - * Write any random value on port 0x80 which is on LPC, to make
1741 - * sure LPC clock is running before sending any TPM command.
1742 - */
1743 - outb(0xCC, 0x80);
1744 -
1745 -}
1746 -
1747 -/**
1748 - * tpm_platform_end_xfer() - set LPC CLKRUN_EN i.e. clocks can be turned off
1749 - */
1750 -static void tpm_platform_end_xfer(void)
1751 -{
1752 - u32 clkrun_val;
1753 -
1754 - if (!is_bsw())
1755 - return;
1756 -
1757 - clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
1758 -
1759 - /* Enable LPC CLKRUN# */
1760 - clkrun_val |= LPC_CLKRUN_EN;
1761 - iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
1762 -
1763 - /*
1764 - * Write any random value on port 0x80 which is on LPC, to make
1765 - * sure LPC clock is running before sending any TPM command.
1766 - */
1767 - outb(0xCC, 0x80);
1768 -
1769 -}
1770 -#else
1771 -static inline bool is_bsw(void)
1772 -{
1773 - return false;
1774 -}
1775 -
1776 -static void tpm_platform_begin_xfer(void)
1777 -{
1778 -}
1779 -
1780 -static void tpm_platform_end_xfer(void)
1781 -{
1782 -}
1783 -#endif
1784 -
1785 static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
1786 u8 *result)
1787 {
1788 struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
1789
1790 - tpm_platform_begin_xfer();
1791 -
1792 while (len--)
1793 *result++ = ioread8(phy->iobase + addr);
1794
1795 - tpm_platform_end_xfer();
1796 -
1797 return 0;
1798 }
1799
1800 @@ -227,13 +148,9 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
1801 {
1802 struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
1803
1804 - tpm_platform_begin_xfer();
1805 -
1806 while (len--)
1807 iowrite8(*value++, phy->iobase + addr);
1808
1809 - tpm_platform_end_xfer();
1810 -
1811 return 0;
1812 }
1813
1814 @@ -241,12 +158,8 @@ static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
1815 {
1816 struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
1817
1818 - tpm_platform_begin_xfer();
1819 -
1820 *result = ioread16(phy->iobase + addr);
1821
1822 - tpm_platform_end_xfer();
1823 -
1824 return 0;
1825 }
1826
1827 @@ -254,12 +167,8 @@ static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
1828 {
1829 struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
1830
1831 - tpm_platform_begin_xfer();
1832 -
1833 *result = ioread32(phy->iobase + addr);
1834
1835 - tpm_platform_end_xfer();
1836 -
1837 return 0;
1838 }
1839
1840 @@ -267,12 +176,8 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
1841 {
1842 struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
1843
1844 - tpm_platform_begin_xfer();
1845 -
1846 iowrite32(value, phy->iobase + addr);
1847
1848 - tpm_platform_end_xfer();
1849 -
1850 return 0;
1851 }
1852
1853 @@ -460,11 +365,6 @@ static int __init init_tis(void)
1854 if (rc)
1855 goto err_force;
1856
1857 -#ifdef CONFIG_X86
1858 - if (is_bsw())
1859 - ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
1860 - ILB_REMAP_SIZE);
1861 -#endif
1862 rc = platform_driver_register(&tis_drv);
1863 if (rc)
1864 goto err_platform;
1865 @@ -483,10 +383,6 @@ static int __init init_tis(void)
1866 err_platform:
1867 if (force_pdev)
1868 platform_device_unregister(force_pdev);
1869 -#ifdef CONFIG_X86
1870 - if (is_bsw())
1871 - iounmap(ilb_base_addr);
1872 -#endif
1873 err_force:
1874 return rc;
1875 }
1876 @@ -496,10 +392,6 @@ static void __exit cleanup_tis(void)
1877 pnp_unregister_driver(&tis_pnp_driver);
1878 platform_driver_unregister(&tis_drv);
1879
1880 -#ifdef CONFIG_X86
1881 - if (is_bsw())
1882 - iounmap(ilb_base_addr);
1883 -#endif
1884 if (force_pdev)
1885 platform_device_unregister(force_pdev);
1886 }
1887 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
1888 index 083578b2517e..a21e31c2b952 100644
1889 --- a/drivers/char/tpm/tpm_tis_core.c
1890 +++ b/drivers/char/tpm/tpm_tis_core.c
1891 @@ -31,6 +31,8 @@
1892 #include "tpm.h"
1893 #include "tpm_tis_core.h"
1894
1895 +static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
1896 +
1897 /* Before we attempt to access the TPM we must see that the valid bit is set.
1898 * The specification says that this bit is 0 at reset and remains 0 until the
1899 * 'TPM has gone through its self test and initialization and has established
1900 @@ -422,19 +424,28 @@ static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
1901 int i, rc;
1902 u32 did_vid;
1903
1904 + if (chip->ops->clk_enable != NULL)
1905 + chip->ops->clk_enable(chip, true);
1906 +
1907 rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
1908 if (rc < 0)
1909 - return rc;
1910 + goto out;
1911
1912 for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
1913 if (vendor_timeout_overrides[i].did_vid != did_vid)
1914 continue;
1915 memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
1916 sizeof(vendor_timeout_overrides[i].timeout_us));
1917 - return true;
1918 + rc = true;
1919 }
1920
1921 - return false;
1922 + rc = false;
1923 +
1924 +out:
1925 + if (chip->ops->clk_enable != NULL)
1926 + chip->ops->clk_enable(chip, false);
1927 +
1928 + return rc;
1929 }
1930
1931 /*
1932 @@ -654,14 +665,73 @@ void tpm_tis_remove(struct tpm_chip *chip)
1933 u32 interrupt;
1934 int rc;
1935
1936 + tpm_tis_clkrun_enable(chip, true);
1937 +
1938 rc = tpm_tis_read32(priv, reg, &interrupt);
1939 if (rc < 0)
1940 interrupt = 0;
1941
1942 tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
1943 +
1944 + tpm_tis_clkrun_enable(chip, false);
1945 +
1946 + if (priv->ilb_base_addr)
1947 + iounmap(priv->ilb_base_addr);
1948 }
1949 EXPORT_SYMBOL_GPL(tpm_tis_remove);
1950
1951 +/**
1952 + * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration
1953 + * of a single TPM command
1954 + * @chip: TPM chip to use
1955 + * @value: 1 - Disable CLKRUN protocol, so that clocks are free running
1956 + * 0 - Enable CLKRUN protocol
1957 + * Call this function directly in tpm_tis_remove() in error or driver removal
1958 + * path, since the chip->ops is set to NULL in tpm_chip_unregister().
1959 + */
1960 +static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
1961 +{
1962 + struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
1963 + u32 clkrun_val;
1964 +
1965 + if (!IS_ENABLED(CONFIG_X86) || !is_bsw() ||
1966 + !data->ilb_base_addr)
1967 + return;
1968 +
1969 + if (value) {
1970 + data->clkrun_enabled++;
1971 + if (data->clkrun_enabled > 1)
1972 + return;
1973 + clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
1974 +
1975 + /* Disable LPC CLKRUN# */
1976 + clkrun_val &= ~LPC_CLKRUN_EN;
1977 + iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
1978 +
1979 + /*
1980 + * Write any random value on port 0x80 which is on LPC, to make
1981 + * sure LPC clock is running before sending any TPM command.
1982 + */
1983 + outb(0xCC, 0x80);
1984 + } else {
1985 + data->clkrun_enabled--;
1986 + if (data->clkrun_enabled)
1987 + return;
1988 +
1989 + clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
1990 +
1991 + /* Enable LPC CLKRUN# */
1992 + clkrun_val |= LPC_CLKRUN_EN;
1993 + iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
1994 +
1995 + /*
1996 + * Write any random value on port 0x80 which is on LPC, to make
1997 + * sure LPC clock is running before sending any TPM command.
1998 + */
1999 + outb(0xCC, 0x80);
2000 + }
2001 +}
2002 +
2003 static const struct tpm_class_ops tpm_tis = {
2004 .flags = TPM_OPS_AUTO_STARTUP,
2005 .status = tpm_tis_status,
2006 @@ -674,6 +744,7 @@ static const struct tpm_class_ops tpm_tis = {
2007 .req_canceled = tpm_tis_req_canceled,
2008 .request_locality = request_locality,
2009 .relinquish_locality = release_locality,
2010 + .clk_enable = tpm_tis_clkrun_enable,
2011 };
2012
2013 int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
2014 @@ -681,6 +752,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
2015 acpi_handle acpi_dev_handle)
2016 {
2017 u32 vendor, intfcaps, intmask;
2018 + u32 clkrun_val;
2019 u8 rid;
2020 int rc, probe;
2021 struct tpm_chip *chip;
2022 @@ -701,6 +773,23 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
2023 priv->phy_ops = phy_ops;
2024 dev_set_drvdata(&chip->dev, priv);
2025
2026 + if (is_bsw()) {
2027 + priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
2028 + ILB_REMAP_SIZE);
2029 + if (!priv->ilb_base_addr)
2030 + return -ENOMEM;
2031 +
2032 + clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET);
2033 + /* Check if CLKRUN# is already not enabled in the LPC bus */
2034 + if (!(clkrun_val & LPC_CLKRUN_EN)) {
2035 + iounmap(priv->ilb_base_addr);
2036 + priv->ilb_base_addr = NULL;
2037 + }
2038 + }
2039 +
2040 + if (chip->ops->clk_enable != NULL)
2041 + chip->ops->clk_enable(chip, true);
2042 +
2043 if (wait_startup(chip, 0) != 0) {
2044 rc = -ENODEV;
2045 goto out_err;
2046 @@ -791,9 +880,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
2047 }
2048 }
2049
2050 - return tpm_chip_register(chip);
2051 + rc = tpm_chip_register(chip);
2052 + if (rc)
2053 + goto out_err;
2054 +
2055 + if (chip->ops->clk_enable != NULL)
2056 + chip->ops->clk_enable(chip, false);
2057 +
2058 + return 0;
2059 out_err:
2060 + if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
2061 + chip->ops->clk_enable(chip, false);
2062 +
2063 tpm_tis_remove(chip);
2064 +
2065 return rc;
2066 }
2067 EXPORT_SYMBOL_GPL(tpm_tis_core_init);
2068 @@ -805,22 +905,31 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
2069 u32 intmask;
2070 int rc;
2071
2072 + if (chip->ops->clk_enable != NULL)
2073 + chip->ops->clk_enable(chip, true);
2074 +
2075 /* reenable interrupts that device may have lost or
2076 * BIOS/firmware may have disabled
2077 */
2078 rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
2079 if (rc < 0)
2080 - return;
2081 + goto out;
2082
2083 rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
2084 if (rc < 0)
2085 - return;
2086 + goto out;
2087
2088 intmask |= TPM_INTF_CMD_READY_INT
2089 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
2090 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
2091
2092 tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
2093 +
2094 +out:
2095 + if (chip->ops->clk_enable != NULL)
2096 + chip->ops->clk_enable(chip, false);
2097 +
2098 + return;
2099 }
2100
2101 int tpm_tis_resume(struct device *dev)
2102 diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
2103 index 6bbac319ff3b..d5c6a2e952b3 100644
2104 --- a/drivers/char/tpm/tpm_tis_core.h
2105 +++ b/drivers/char/tpm/tpm_tis_core.h
2106 @@ -79,6 +79,11 @@ enum tis_defaults {
2107 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
2108 #define TPM_RID(l) (0x0F04 | ((l) << 12))
2109
2110 +#define LPC_CNTRL_OFFSET 0x84
2111 +#define LPC_CLKRUN_EN (1 << 2)
2112 +#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
2113 +#define ILB_REMAP_SIZE 0x100
2114 +
2115 enum tpm_tis_flags {
2116 TPM_TIS_ITPM_WORKAROUND = BIT(0),
2117 };
2118 @@ -89,6 +94,8 @@ struct tpm_tis_data {
2119 int irq;
2120 bool irq_tested;
2121 unsigned int flags;
2122 + void __iomem *ilb_base_addr;
2123 + u16 clkrun_enabled;
2124 wait_queue_head_t int_queue;
2125 wait_queue_head_t read_queue;
2126 const struct tpm_tis_phy_ops *phy_ops;
2127 @@ -144,6 +151,15 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
2128 return data->phy_ops->write32(data, addr, value);
2129 }
2130
2131 +static inline bool is_bsw(void)
2132 +{
2133 +#ifdef CONFIG_X86
2134 + return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
2135 +#else
2136 + return false;
2137 +#endif
2138 +}
2139 +
2140 void tpm_tis_remove(struct tpm_chip *chip);
2141 int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
2142 const struct tpm_tis_phy_ops *phy_ops,
2143 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
2144 index 57afad79f55d..8fa850a070e0 100644
2145 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
2146 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
2147 @@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
2148 size_t size;
2149 u32 retry = 3;
2150
2151 + if (amdgpu_acpi_pcie_notify_device_ready(adev))
2152 + return -EINVAL;
2153 +
2154 /* Get the device handle */
2155 handle = ACPI_HANDLE(&adev->pdev->dev);
2156 if (!handle)
2157 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
2158 index 8d1cf2d3e663..848242821ef3 100644
2159 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
2160 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
2161 @@ -739,9 +739,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
2162 enum drm_connector_status ret = connector_status_disconnected;
2163 int r;
2164
2165 - r = pm_runtime_get_sync(connector->dev->dev);
2166 - if (r < 0)
2167 - return connector_status_disconnected;
2168 + if (!drm_kms_helper_is_poll_worker()) {
2169 + r = pm_runtime_get_sync(connector->dev->dev);
2170 + if (r < 0)
2171 + return connector_status_disconnected;
2172 + }
2173
2174 if (encoder) {
2175 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2176 @@ -760,8 +762,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
2177 /* check acpi lid status ??? */
2178
2179 amdgpu_connector_update_scratch_regs(connector, ret);
2180 - pm_runtime_mark_last_busy(connector->dev->dev);
2181 - pm_runtime_put_autosuspend(connector->dev->dev);
2182 +
2183 + if (!drm_kms_helper_is_poll_worker()) {
2184 + pm_runtime_mark_last_busy(connector->dev->dev);
2185 + pm_runtime_put_autosuspend(connector->dev->dev);
2186 + }
2187 +
2188 return ret;
2189 }
2190
2191 @@ -871,9 +877,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
2192 enum drm_connector_status ret = connector_status_disconnected;
2193 int r;
2194
2195 - r = pm_runtime_get_sync(connector->dev->dev);
2196 - if (r < 0)
2197 - return connector_status_disconnected;
2198 + if (!drm_kms_helper_is_poll_worker()) {
2199 + r = pm_runtime_get_sync(connector->dev->dev);
2200 + if (r < 0)
2201 + return connector_status_disconnected;
2202 + }
2203
2204 encoder = amdgpu_connector_best_single_encoder(connector);
2205 if (!encoder)
2206 @@ -927,8 +935,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
2207 amdgpu_connector_update_scratch_regs(connector, ret);
2208
2209 out:
2210 - pm_runtime_mark_last_busy(connector->dev->dev);
2211 - pm_runtime_put_autosuspend(connector->dev->dev);
2212 + if (!drm_kms_helper_is_poll_worker()) {
2213 + pm_runtime_mark_last_busy(connector->dev->dev);
2214 + pm_runtime_put_autosuspend(connector->dev->dev);
2215 + }
2216
2217 return ret;
2218 }
2219 @@ -991,9 +1001,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
2220 enum drm_connector_status ret = connector_status_disconnected;
2221 bool dret = false, broken_edid = false;
2222
2223 - r = pm_runtime_get_sync(connector->dev->dev);
2224 - if (r < 0)
2225 - return connector_status_disconnected;
2226 + if (!drm_kms_helper_is_poll_worker()) {
2227 + r = pm_runtime_get_sync(connector->dev->dev);
2228 + if (r < 0)
2229 + return connector_status_disconnected;
2230 + }
2231
2232 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
2233 ret = connector->status;
2234 @@ -1118,8 +1130,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
2235 amdgpu_connector_update_scratch_regs(connector, ret);
2236
2237 exit:
2238 - pm_runtime_mark_last_busy(connector->dev->dev);
2239 - pm_runtime_put_autosuspend(connector->dev->dev);
2240 + if (!drm_kms_helper_is_poll_worker()) {
2241 + pm_runtime_mark_last_busy(connector->dev->dev);
2242 + pm_runtime_put_autosuspend(connector->dev->dev);
2243 + }
2244
2245 return ret;
2246 }
2247 @@ -1362,9 +1376,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
2248 struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
2249 int r;
2250
2251 - r = pm_runtime_get_sync(connector->dev->dev);
2252 - if (r < 0)
2253 - return connector_status_disconnected;
2254 + if (!drm_kms_helper_is_poll_worker()) {
2255 + r = pm_runtime_get_sync(connector->dev->dev);
2256 + if (r < 0)
2257 + return connector_status_disconnected;
2258 + }
2259
2260 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
2261 ret = connector->status;
2262 @@ -1432,8 +1448,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
2263
2264 amdgpu_connector_update_scratch_regs(connector, ret);
2265 out:
2266 - pm_runtime_mark_last_busy(connector->dev->dev);
2267 - pm_runtime_put_autosuspend(connector->dev->dev);
2268 + if (!drm_kms_helper_is_poll_worker()) {
2269 + pm_runtime_mark_last_busy(connector->dev->dev);
2270 + pm_runtime_put_autosuspend(connector->dev->dev);
2271 + }
2272
2273 return ret;
2274 }
2275 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
2276 index e19928dae8e3..17deca0f6255 100644
2277 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
2278 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
2279 @@ -293,12 +293,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
2280 if (adev->uvd.vcpu_bo == NULL)
2281 return 0;
2282
2283 - for (i = 0; i < adev->uvd.max_handles; ++i)
2284 - if (atomic_read(&adev->uvd.handles[i]))
2285 - break;
2286 + /* only valid for physical mode */
2287 + if (adev->asic_type < CHIP_POLARIS10) {
2288 + for (i = 0; i < adev->uvd.max_handles; ++i)
2289 + if (atomic_read(&adev->uvd.handles[i]))
2290 + break;
2291
2292 - if (i == AMDGPU_MAX_UVD_HANDLES)
2293 - return 0;
2294 + if (i == adev->uvd.max_handles)
2295 + return 0;
2296 + }
2297
2298 cancel_delayed_work_sync(&adev->uvd.idle_work);
2299
2300 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
2301 index 00868764a0dd..6f76b2646465 100644
2302 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
2303 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
2304 @@ -4387,34 +4387,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
2305 case CHIP_KAVERI:
2306 adev->gfx.config.max_shader_engines = 1;
2307 adev->gfx.config.max_tile_pipes = 4;
2308 - if ((adev->pdev->device == 0x1304) ||
2309 - (adev->pdev->device == 0x1305) ||
2310 - (adev->pdev->device == 0x130C) ||
2311 - (adev->pdev->device == 0x130F) ||
2312 - (adev->pdev->device == 0x1310) ||
2313 - (adev->pdev->device == 0x1311) ||
2314 - (adev->pdev->device == 0x131C)) {
2315 - adev->gfx.config.max_cu_per_sh = 8;
2316 - adev->gfx.config.max_backends_per_se = 2;
2317 - } else if ((adev->pdev->device == 0x1309) ||
2318 - (adev->pdev->device == 0x130A) ||
2319 - (adev->pdev->device == 0x130D) ||
2320 - (adev->pdev->device == 0x1313) ||
2321 - (adev->pdev->device == 0x131D)) {
2322 - adev->gfx.config.max_cu_per_sh = 6;
2323 - adev->gfx.config.max_backends_per_se = 2;
2324 - } else if ((adev->pdev->device == 0x1306) ||
2325 - (adev->pdev->device == 0x1307) ||
2326 - (adev->pdev->device == 0x130B) ||
2327 - (adev->pdev->device == 0x130E) ||
2328 - (adev->pdev->device == 0x1315) ||
2329 - (adev->pdev->device == 0x131B)) {
2330 - adev->gfx.config.max_cu_per_sh = 4;
2331 - adev->gfx.config.max_backends_per_se = 1;
2332 - } else {
2333 - adev->gfx.config.max_cu_per_sh = 3;
2334 - adev->gfx.config.max_backends_per_se = 1;
2335 - }
2336 + adev->gfx.config.max_cu_per_sh = 8;
2337 + adev->gfx.config.max_backends_per_se = 2;
2338 adev->gfx.config.max_sh_per_se = 1;
2339 adev->gfx.config.max_texture_channel_caches = 4;
2340 adev->gfx.config.max_gprs = 256;
2341 diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
2342 index 8284d5dbfc30..4c178feeb4bd 100644
2343 --- a/drivers/gpu/drm/amd/amdgpu/si.c
2344 +++ b/drivers/gpu/drm/amd/amdgpu/si.c
2345 @@ -31,6 +31,7 @@
2346 #include "amdgpu_uvd.h"
2347 #include "amdgpu_vce.h"
2348 #include "atom.h"
2349 +#include "amd_pcie.h"
2350 #include "amdgpu_powerplay.h"
2351 #include "sid.h"
2352 #include "si_ih.h"
2353 @@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
2354 {
2355 struct pci_dev *root = adev->pdev->bus->self;
2356 int bridge_pos, gpu_pos;
2357 - u32 speed_cntl, mask, current_data_rate;
2358 - int ret, i;
2359 + u32 speed_cntl, current_data_rate;
2360 + int i;
2361 u16 tmp16;
2362
2363 if (pci_is_root_bus(adev->pdev->bus))
2364 @@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
2365 if (adev->flags & AMD_IS_APU)
2366 return;
2367
2368 - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2369 - if (ret != 0)
2370 - return;
2371 -
2372 - if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
2373 + if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2374 + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
2375 return;
2376
2377 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
2378 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
2379 LC_CURRENT_DATA_RATE_SHIFT;
2380 - if (mask & DRM_PCIE_SPEED_80) {
2381 + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
2382 if (current_data_rate == 2) {
2383 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
2384 return;
2385 }
2386 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
2387 - } else if (mask & DRM_PCIE_SPEED_50) {
2388 + } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
2389 if (current_data_rate == 1) {
2390 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
2391 return;
2392 @@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
2393 if (!gpu_pos)
2394 return;
2395
2396 - if (mask & DRM_PCIE_SPEED_80) {
2397 + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
2398 if (current_data_rate != 2) {
2399 u16 bridge_cfg, gpu_cfg;
2400 u16 bridge_cfg2, gpu_cfg2;
2401 @@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
2402
2403 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
2404 tmp16 &= ~0xf;
2405 - if (mask & DRM_PCIE_SPEED_80)
2406 + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2407 tmp16 |= 3;
2408 - else if (mask & DRM_PCIE_SPEED_50)
2409 + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
2410 tmp16 |= 2;
2411 else
2412 tmp16 |= 1;
2413 diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
2414 index a2aeb643ac51..abb0a2341a41 100644
2415 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
2416 +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
2417 @@ -26,6 +26,7 @@
2418 #include "amdgpu_pm.h"
2419 #include "amdgpu_dpm.h"
2420 #include "amdgpu_atombios.h"
2421 +#include "amd_pcie.h"
2422 #include "sid.h"
2423 #include "r600_dpm.h"
2424 #include "si_dpm.h"
2425 @@ -3332,29 +3333,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
2426 }
2427 }
2428
2429 -static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
2430 - u32 sys_mask,
2431 - enum amdgpu_pcie_gen asic_gen,
2432 - enum amdgpu_pcie_gen default_gen)
2433 -{
2434 - switch (asic_gen) {
2435 - case AMDGPU_PCIE_GEN1:
2436 - return AMDGPU_PCIE_GEN1;
2437 - case AMDGPU_PCIE_GEN2:
2438 - return AMDGPU_PCIE_GEN2;
2439 - case AMDGPU_PCIE_GEN3:
2440 - return AMDGPU_PCIE_GEN3;
2441 - default:
2442 - if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
2443 - return AMDGPU_PCIE_GEN3;
2444 - else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
2445 - return AMDGPU_PCIE_GEN2;
2446 - else
2447 - return AMDGPU_PCIE_GEN1;
2448 - }
2449 - return AMDGPU_PCIE_GEN1;
2450 -}
2451 -
2452 static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
2453 u32 *p, u32 *u)
2454 {
2455 @@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
2456 table->ACPIState.levels[0].vddc.index,
2457 &table->ACPIState.levels[0].std_vddc);
2458 }
2459 - table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
2460 - si_pi->sys_pcie_mask,
2461 - si_pi->boot_pcie_gen,
2462 - AMDGPU_PCIE_GEN1);
2463 + table->ACPIState.levels[0].gen2PCIE =
2464 + (u8)amdgpu_get_pcie_gen_support(adev,
2465 + si_pi->sys_pcie_mask,
2466 + si_pi->boot_pcie_gen,
2467 + AMDGPU_PCIE_GEN1);
2468
2469 if (si_pi->vddc_phase_shed_control)
2470 si_populate_phase_shedding_value(adev,
2471 @@ -7162,10 +7141,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
2472 pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
2473 pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
2474 pl->flags = le32_to_cpu(clock_info->si.ulFlags);
2475 - pl->pcie_gen = r600_get_pcie_gen_support(adev,
2476 - si_pi->sys_pcie_mask,
2477 - si_pi->boot_pcie_gen,
2478 - clock_info->si.ucPCIEGen);
2479 + pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
2480 + si_pi->sys_pcie_mask,
2481 + si_pi->boot_pcie_gen,
2482 + clock_info->si.ucPCIEGen);
2483
2484 /* patch up vddc if necessary */
2485 ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
2486 @@ -7320,7 +7299,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
2487 struct si_power_info *si_pi;
2488 struct atom_clock_dividers dividers;
2489 int ret;
2490 - u32 mask;
2491
2492 si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
2493 if (si_pi == NULL)
2494 @@ -7330,11 +7308,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
2495 eg_pi = &ni_pi->eg;
2496 pi = &eg_pi->rv7xx;
2497
2498 - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2499 - if (ret)
2500 - si_pi->sys_pcie_mask = 0;
2501 - else
2502 - si_pi->sys_pcie_mask = mask;
2503 + si_pi->sys_pcie_mask =
2504 + (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
2505 + CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
2506 si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
2507 si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
2508
2509 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
2510 index b526f49be65d..336fdd8c7db0 100644
2511 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
2512 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
2513 @@ -2788,10 +2788,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2514 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2515
2516
2517 - disable_mclk_switching = ((1 < info.display_count) ||
2518 - disable_mclk_switching_for_frame_lock ||
2519 - smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
2520 - (mode_info.refresh_rate > 120));
2521 + if (info.display_count == 0)
2522 + disable_mclk_switching = false;
2523 + else
2524 + disable_mclk_switching = ((1 < info.display_count) ||
2525 + disable_mclk_switching_for_frame_lock ||
2526 + smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
2527 + (mode_info.refresh_rate > 120));
2528
2529 sclk = smu7_ps->performance_levels[0].engine_clock;
2530 mclk = smu7_ps->performance_levels[0].memory_clock;
2531 @@ -4576,13 +4579,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
2532 int tmp_result, result = 0;
2533 uint32_t sclk_mask = 0, mclk_mask = 0;
2534
2535 - if (hwmgr->chip_id == CHIP_FIJI) {
2536 - if (request->type == AMD_PP_GFX_PROFILE)
2537 - smu7_enable_power_containment(hwmgr);
2538 - else if (request->type == AMD_PP_COMPUTE_PROFILE)
2539 - smu7_disable_power_containment(hwmgr);
2540 - }
2541 -
2542 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
2543 return -EINVAL;
2544
2545 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
2546 index f8f02e70b8bc..ca232a9e2334 100644
2547 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
2548 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
2549 @@ -3243,10 +3243,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2550 force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2551 PHM_PlatformCaps_ForceMclkHigh);
2552
2553 - disable_mclk_switching = (info.display_count > 1) ||
2554 - disable_mclk_switching_for_frame_lock ||
2555 - disable_mclk_switching_for_vr ||
2556 - force_mclk_high;
2557 + if (info.display_count == 0)
2558 + disable_mclk_switching = false;
2559 + else
2560 + disable_mclk_switching = (info.display_count > 1) ||
2561 + disable_mclk_switching_for_frame_lock ||
2562 + disable_mclk_switching_for_vr ||
2563 + force_mclk_high;
2564
2565 sclk = vega10_ps->performance_levels[0].gfx_clock;
2566 mclk = vega10_ps->performance_levels[0].mem_clock;
2567 diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
2568 index af279844d7ce..dd4727489b84 100644
2569 --- a/drivers/gpu/drm/drm_framebuffer.c
2570 +++ b/drivers/gpu/drm/drm_framebuffer.c
2571 @@ -118,6 +118,10 @@ int drm_mode_addfb(struct drm_device *dev,
2572 r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
2573 r.handles[0] = or->handle;
2574
2575 + if (r.pixel_format == DRM_FORMAT_XRGB2101010 &&
2576 + dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP)
2577 + r.pixel_format = DRM_FORMAT_XBGR2101010;
2578 +
2579 ret = drm_mode_addfb2(dev, &r, file_priv);
2580 if (ret)
2581 return ret;
2582 diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
2583 index 904966cde32b..d29fd8443fed 100644
2584 --- a/drivers/gpu/drm/drm_probe_helper.c
2585 +++ b/drivers/gpu/drm/drm_probe_helper.c
2586 @@ -671,6 +671,26 @@ static void output_poll_execute(struct work_struct *work)
2587 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
2588 }
2589
2590 +/**
2591 + * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
2592 + *
2593 + * Determine if %current task is an output poll worker. This can be used
2594 + * to select distinct code paths for output polling versus other contexts.
2595 + *
2596 + * One use case is to avoid a deadlock between the output poll worker and
2597 + * the autosuspend worker wherein the latter waits for polling to finish
2598 + * upon calling drm_kms_helper_poll_disable(), while the former waits for
2599 + * runtime suspend to finish upon calling pm_runtime_get_sync() in a
2600 + * connector ->detect hook.
2601 + */
2602 +bool drm_kms_helper_is_poll_worker(void)
2603 +{
2604 + struct work_struct *work = current_work();
2605 +
2606 + return work && work->func == output_poll_execute;
2607 +}
2608 +EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
2609 +
2610 /**
2611 * drm_kms_helper_poll_disable - disable output polling
2612 * @dev: drm_device
2613 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
2614 index 5c5cb2ceee49..562220ec9d41 100644
2615 --- a/drivers/gpu/drm/i915/i915_drv.c
2616 +++ b/drivers/gpu/drm/i915/i915_drv.c
2617 @@ -1806,6 +1806,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
2618 if (IS_GEN9_LP(dev_priv) ||
2619 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
2620 intel_power_domains_init_hw(dev_priv, true);
2621 + else
2622 + intel_display_set_init_power(dev_priv, true);
2623
2624 i915_gem_sanitize(dev_priv);
2625
2626 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
2627 index 83876a1c8d98..de8ca5f1dd2e 100644
2628 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
2629 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
2630 @@ -499,6 +499,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
2631 list_add_tail(&vma->exec_link, &eb->unbound);
2632 if (drm_mm_node_allocated(&vma->node))
2633 err = i915_vma_unbind(vma);
2634 + if (unlikely(err))
2635 + vma->exec_flags = NULL;
2636 }
2637 return err;
2638 }
2639 @@ -2408,7 +2410,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2640 if (out_fence) {
2641 if (err == 0) {
2642 fd_install(out_fence_fd, out_fence->file);
2643 - args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
2644 + args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2645 args->rsvd2 |= (u64)out_fence_fd << 32;
2646 out_fence_fd = -1;
2647 } else {
2648 diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
2649 index 370b9d248fed..3e49317f3ec3 100644
2650 --- a/drivers/gpu/drm/i915/i915_perf.c
2651 +++ b/drivers/gpu/drm/i915/i915_perf.c
2652 @@ -1300,9 +1300,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
2653 */
2654 mutex_lock(&dev_priv->drm.struct_mutex);
2655 dev_priv->perf.oa.exclusive_stream = NULL;
2656 - mutex_unlock(&dev_priv->drm.struct_mutex);
2657 -
2658 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
2659 + mutex_unlock(&dev_priv->drm.struct_mutex);
2660
2661 free_oa_buffer(dev_priv);
2662
2663 @@ -1754,22 +1753,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
2664 * Note: it's only the RCS/Render context that has any OA state.
2665 */
2666 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
2667 - const struct i915_oa_config *oa_config,
2668 - bool interruptible)
2669 + const struct i915_oa_config *oa_config)
2670 {
2671 struct i915_gem_context *ctx;
2672 int ret;
2673 unsigned int wait_flags = I915_WAIT_LOCKED;
2674
2675 - if (interruptible) {
2676 - ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2677 - if (ret)
2678 - return ret;
2679 -
2680 - wait_flags |= I915_WAIT_INTERRUPTIBLE;
2681 - } else {
2682 - mutex_lock(&dev_priv->drm.struct_mutex);
2683 - }
2684 + lockdep_assert_held(&dev_priv->drm.struct_mutex);
2685
2686 /* Switch away from any user context. */
2687 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
2688 @@ -1817,8 +1807,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
2689 }
2690
2691 out:
2692 - mutex_unlock(&dev_priv->drm.struct_mutex);
2693 -
2694 return ret;
2695 }
2696
2697 @@ -1862,7 +1850,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
2698 * to make sure all slices/subslices are ON before writing to NOA
2699 * registers.
2700 */
2701 - ret = gen8_configure_all_contexts(dev_priv, oa_config, true);
2702 + ret = gen8_configure_all_contexts(dev_priv, oa_config);
2703 if (ret)
2704 return ret;
2705
2706 @@ -1877,7 +1865,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
2707 static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
2708 {
2709 /* Reset all contexts' slices/subslices configurations. */
2710 - gen8_configure_all_contexts(dev_priv, NULL, false);
2711 + gen8_configure_all_contexts(dev_priv, NULL);
2712
2713 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
2714 ~GT_NOA_ENABLE));
2715 @@ -2127,6 +2115,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2716 if (ret)
2717 goto err_oa_buf_alloc;
2718
2719 + ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2720 + if (ret)
2721 + goto err_lock;
2722 +
2723 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
2724 stream->oa_config);
2725 if (ret)
2726 @@ -2134,23 +2126,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2727
2728 stream->ops = &i915_oa_stream_ops;
2729
2730 - /* Lock device for exclusive_stream access late because
2731 - * enable_metric_set() might lock as well on gen8+.
2732 - */
2733 - ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2734 - if (ret)
2735 - goto err_lock;
2736 -
2737 dev_priv->perf.oa.exclusive_stream = stream;
2738
2739 mutex_unlock(&dev_priv->drm.struct_mutex);
2740
2741 return 0;
2742
2743 -err_lock:
2744 +err_enable:
2745 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
2746 + mutex_unlock(&dev_priv->drm.struct_mutex);
2747
2748 -err_enable:
2749 +err_lock:
2750 free_oa_buffer(dev_priv);
2751
2752 err_oa_buf_alloc:
2753 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2754 index 059db50109bc..cf648c526e12 100644
2755 --- a/drivers/gpu/drm/i915/intel_display.c
2756 +++ b/drivers/gpu/drm/i915/intel_display.c
2757 @@ -14498,6 +14498,8 @@ static void sanitize_watermarks(struct drm_device *dev)
2758
2759 cs->wm.need_postvbl_update = true;
2760 dev_priv->display.optimize_watermarks(intel_state, cs);
2761 +
2762 + to_intel_crtc_state(crtc->state)->wm = cs->wm;
2763 }
2764
2765 put_state:
2766 diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
2767 index 3fed1d3ecded..1b292d5f1a68 100644
2768 --- a/drivers/gpu/drm/i915/intel_hdmi.c
2769 +++ b/drivers/gpu/drm/i915/intel_hdmi.c
2770 @@ -1563,12 +1563,20 @@ intel_hdmi_set_edid(struct drm_connector *connector)
2771 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
2772 struct edid *edid;
2773 bool connected = false;
2774 + struct i2c_adapter *i2c;
2775
2776 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
2777
2778 - edid = drm_get_edid(connector,
2779 - intel_gmbus_get_adapter(dev_priv,
2780 - intel_hdmi->ddc_bus));
2781 + i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
2782 +
2783 + edid = drm_get_edid(connector, i2c);
2784 +
2785 + if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
2786 + DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
2787 + intel_gmbus_force_bit(i2c, true);
2788 + edid = drm_get_edid(connector, i2c);
2789 + intel_gmbus_force_bit(i2c, false);
2790 + }
2791
2792 intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
2793
2794 diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
2795 index 51cb5293bf43..bcccacba1ec6 100644
2796 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c
2797 +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
2798 @@ -1844,6 +1844,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
2799 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2800 BIT_ULL(POWER_DOMAIN_MODESET) | \
2801 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2802 + BIT_ULL(POWER_DOMAIN_GMBUS) | \
2803 BIT_ULL(POWER_DOMAIN_INIT))
2804
2805 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2806 diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
2807 index 70d8e0d69ad5..c902a851eb51 100644
2808 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c
2809 +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
2810 @@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
2811 nv_connector->edid = NULL;
2812 }
2813
2814 - ret = pm_runtime_get_sync(connector->dev->dev);
2815 - if (ret < 0 && ret != -EACCES)
2816 - return conn_status;
2817 + /* Outputs are only polled while runtime active, so acquiring a
2818 + * runtime PM ref here is unnecessary (and would deadlock upon
2819 + * runtime suspend because it waits for polling to finish).
2820 + */
2821 + if (!drm_kms_helper_is_poll_worker()) {
2822 + ret = pm_runtime_get_sync(connector->dev->dev);
2823 + if (ret < 0 && ret != -EACCES)
2824 + return conn_status;
2825 + }
2826
2827 nv_encoder = nouveau_connector_ddc_detect(connector);
2828 if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
2829 @@ -647,8 +653,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
2830
2831 out:
2832
2833 - pm_runtime_mark_last_busy(connector->dev->dev);
2834 - pm_runtime_put_autosuspend(connector->dev->dev);
2835 + if (!drm_kms_helper_is_poll_worker()) {
2836 + pm_runtime_mark_last_busy(connector->dev->dev);
2837 + pm_runtime_put_autosuspend(connector->dev->dev);
2838 + }
2839
2840 return conn_status;
2841 }
2842 diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
2843 index fb47d46050ec..6e196bc01118 100644
2844 --- a/drivers/gpu/drm/nouveau/nv50_display.c
2845 +++ b/drivers/gpu/drm/nouveau/nv50_display.c
2846 @@ -4426,6 +4426,7 @@ nv50_display_create(struct drm_device *dev)
2847 nouveau_display(dev)->fini = nv50_display_fini;
2848 disp->disp = &nouveau_display(dev)->disp;
2849 dev->mode_config.funcs = &nv50_disp_func;
2850 + dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
2851 if (nouveau_atomic)
2852 dev->driver->driver_features |= DRIVER_ATOMIC;
2853
2854 diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
2855 index 3cb6c55b268d..ce8b353b5753 100644
2856 --- a/drivers/gpu/drm/radeon/cik.c
2857 +++ b/drivers/gpu/drm/radeon/cik.c
2858 @@ -3229,35 +3229,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
2859 case CHIP_KAVERI:
2860 rdev->config.cik.max_shader_engines = 1;
2861 rdev->config.cik.max_tile_pipes = 4;
2862 - if ((rdev->pdev->device == 0x1304) ||
2863 - (rdev->pdev->device == 0x1305) ||
2864 - (rdev->pdev->device == 0x130C) ||
2865 - (rdev->pdev->device == 0x130F) ||
2866 - (rdev->pdev->device == 0x1310) ||
2867 - (rdev->pdev->device == 0x1311) ||
2868 - (rdev->pdev->device == 0x131C)) {
2869 - rdev->config.cik.max_cu_per_sh = 8;
2870 - rdev->config.cik.max_backends_per_se = 2;
2871 - } else if ((rdev->pdev->device == 0x1309) ||
2872 - (rdev->pdev->device == 0x130A) ||
2873 - (rdev->pdev->device == 0x130D) ||
2874 - (rdev->pdev->device == 0x1313) ||
2875 - (rdev->pdev->device == 0x131D)) {
2876 - rdev->config.cik.max_cu_per_sh = 6;
2877 - rdev->config.cik.max_backends_per_se = 2;
2878 - } else if ((rdev->pdev->device == 0x1306) ||
2879 - (rdev->pdev->device == 0x1307) ||
2880 - (rdev->pdev->device == 0x130B) ||
2881 - (rdev->pdev->device == 0x130E) ||
2882 - (rdev->pdev->device == 0x1315) ||
2883 - (rdev->pdev->device == 0x1318) ||
2884 - (rdev->pdev->device == 0x131B)) {
2885 - rdev->config.cik.max_cu_per_sh = 4;
2886 - rdev->config.cik.max_backends_per_se = 1;
2887 - } else {
2888 - rdev->config.cik.max_cu_per_sh = 3;
2889 - rdev->config.cik.max_backends_per_se = 1;
2890 - }
2891 + rdev->config.cik.max_cu_per_sh = 8;
2892 + rdev->config.cik.max_backends_per_se = 2;
2893 rdev->config.cik.max_sh_per_se = 1;
2894 rdev->config.cik.max_texture_channel_caches = 4;
2895 rdev->config.cik.max_gprs = 256;
2896 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2897 index 2f642cbefd8e..c0da44742988 100644
2898 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
2899 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2900 @@ -900,9 +900,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
2901 enum drm_connector_status ret = connector_status_disconnected;
2902 int r;
2903
2904 - r = pm_runtime_get_sync(connector->dev->dev);
2905 - if (r < 0)
2906 - return connector_status_disconnected;
2907 + if (!drm_kms_helper_is_poll_worker()) {
2908 + r = pm_runtime_get_sync(connector->dev->dev);
2909 + if (r < 0)
2910 + return connector_status_disconnected;
2911 + }
2912
2913 if (encoder) {
2914 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2915 @@ -925,8 +927,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
2916 /* check acpi lid status ??? */
2917
2918 radeon_connector_update_scratch_regs(connector, ret);
2919 - pm_runtime_mark_last_busy(connector->dev->dev);
2920 - pm_runtime_put_autosuspend(connector->dev->dev);
2921 +
2922 + if (!drm_kms_helper_is_poll_worker()) {
2923 + pm_runtime_mark_last_busy(connector->dev->dev);
2924 + pm_runtime_put_autosuspend(connector->dev->dev);
2925 + }
2926 +
2927 return ret;
2928 }
2929
2930 @@ -1040,9 +1046,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
2931 enum drm_connector_status ret = connector_status_disconnected;
2932 int r;
2933
2934 - r = pm_runtime_get_sync(connector->dev->dev);
2935 - if (r < 0)
2936 - return connector_status_disconnected;
2937 + if (!drm_kms_helper_is_poll_worker()) {
2938 + r = pm_runtime_get_sync(connector->dev->dev);
2939 + if (r < 0)
2940 + return connector_status_disconnected;
2941 + }
2942
2943 encoder = radeon_best_single_encoder(connector);
2944 if (!encoder)
2945 @@ -1109,8 +1117,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
2946 radeon_connector_update_scratch_regs(connector, ret);
2947
2948 out:
2949 - pm_runtime_mark_last_busy(connector->dev->dev);
2950 - pm_runtime_put_autosuspend(connector->dev->dev);
2951 + if (!drm_kms_helper_is_poll_worker()) {
2952 + pm_runtime_mark_last_busy(connector->dev->dev);
2953 + pm_runtime_put_autosuspend(connector->dev->dev);
2954 + }
2955
2956 return ret;
2957 }
2958 @@ -1174,9 +1184,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
2959 if (!radeon_connector->dac_load_detect)
2960 return ret;
2961
2962 - r = pm_runtime_get_sync(connector->dev->dev);
2963 - if (r < 0)
2964 - return connector_status_disconnected;
2965 + if (!drm_kms_helper_is_poll_worker()) {
2966 + r = pm_runtime_get_sync(connector->dev->dev);
2967 + if (r < 0)
2968 + return connector_status_disconnected;
2969 + }
2970
2971 encoder = radeon_best_single_encoder(connector);
2972 if (!encoder)
2973 @@ -1188,8 +1200,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
2974 if (ret == connector_status_connected)
2975 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
2976 radeon_connector_update_scratch_regs(connector, ret);
2977 - pm_runtime_mark_last_busy(connector->dev->dev);
2978 - pm_runtime_put_autosuspend(connector->dev->dev);
2979 +
2980 + if (!drm_kms_helper_is_poll_worker()) {
2981 + pm_runtime_mark_last_busy(connector->dev->dev);
2982 + pm_runtime_put_autosuspend(connector->dev->dev);
2983 + }
2984 +
2985 return ret;
2986 }
2987
2988 @@ -1252,9 +1268,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
2989 enum drm_connector_status ret = connector_status_disconnected;
2990 bool dret = false, broken_edid = false;
2991
2992 - r = pm_runtime_get_sync(connector->dev->dev);
2993 - if (r < 0)
2994 - return connector_status_disconnected;
2995 + if (!drm_kms_helper_is_poll_worker()) {
2996 + r = pm_runtime_get_sync(connector->dev->dev);
2997 + if (r < 0)
2998 + return connector_status_disconnected;
2999 + }
3000
3001 if (radeon_connector->detected_hpd_without_ddc) {
3002 force = true;
3003 @@ -1437,8 +1455,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
3004 }
3005
3006 exit:
3007 - pm_runtime_mark_last_busy(connector->dev->dev);
3008 - pm_runtime_put_autosuspend(connector->dev->dev);
3009 + if (!drm_kms_helper_is_poll_worker()) {
3010 + pm_runtime_mark_last_busy(connector->dev->dev);
3011 + pm_runtime_put_autosuspend(connector->dev->dev);
3012 + }
3013
3014 return ret;
3015 }
3016 @@ -1689,9 +1709,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
3017 if (radeon_dig_connector->is_mst)
3018 return connector_status_disconnected;
3019
3020 - r = pm_runtime_get_sync(connector->dev->dev);
3021 - if (r < 0)
3022 - return connector_status_disconnected;
3023 + if (!drm_kms_helper_is_poll_worker()) {
3024 + r = pm_runtime_get_sync(connector->dev->dev);
3025 + if (r < 0)
3026 + return connector_status_disconnected;
3027 + }
3028
3029 if (!force && radeon_check_hpd_status_unchanged(connector)) {
3030 ret = connector->status;
3031 @@ -1778,8 +1800,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
3032 }
3033
3034 out:
3035 - pm_runtime_mark_last_busy(connector->dev->dev);
3036 - pm_runtime_put_autosuspend(connector->dev->dev);
3037 + if (!drm_kms_helper_is_poll_worker()) {
3038 + pm_runtime_mark_last_busy(connector->dev->dev);
3039 + pm_runtime_put_autosuspend(connector->dev->dev);
3040 + }
3041
3042 return ret;
3043 }
3044 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
3045 index ffc10cadcf34..32b577c776b9 100644
3046 --- a/drivers/gpu/drm/radeon/radeon_device.c
3047 +++ b/drivers/gpu/drm/radeon/radeon_device.c
3048 @@ -1397,6 +1397,10 @@ int radeon_device_init(struct radeon_device *rdev,
3049 if ((rdev->flags & RADEON_IS_PCI) &&
3050 (rdev->family <= CHIP_RS740))
3051 rdev->need_dma32 = true;
3052 +#ifdef CONFIG_PPC64
3053 + if (rdev->family == CHIP_CEDAR)
3054 + rdev->need_dma32 = true;
3055 +#endif
3056
3057 dma_bits = rdev->need_dma32 ? 32 : 40;
3058 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
3059 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
3060 index 326ad068c15a..4b6542538ff9 100644
3061 --- a/drivers/gpu/drm/radeon/radeon_pm.c
3062 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
3063 @@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
3064 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
3065 static void radeon_pm_update_profile(struct radeon_device *rdev);
3066 static void radeon_pm_set_clocks(struct radeon_device *rdev);
3067 -static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
3068
3069 int radeon_pm_get_type_index(struct radeon_device *rdev,
3070 enum radeon_pm_state_type ps_type,
3071 @@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
3072 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
3073 }
3074 mutex_unlock(&rdev->pm.mutex);
3075 - /* allow new DPM state to be picked */
3076 - radeon_pm_compute_clocks_dpm(rdev);
3077 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
3078 if (rdev->pm.profile == PM_PROFILE_AUTO) {
3079 mutex_lock(&rdev->pm.mutex);
3080 @@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
3081 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
3082 /* balanced states don't exist at the moment */
3083 if (dpm_state == POWER_STATE_TYPE_BALANCED)
3084 - dpm_state = rdev->pm.dpm.ac_power ?
3085 - POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
3086 + dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3087
3088 restart_search:
3089 /* Pick the best power state based on current conditions */
3090 diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
3091 index d7d042a20ab4..4dff06ab771e 100644
3092 --- a/drivers/infiniband/core/device.c
3093 +++ b/drivers/infiniband/core/device.c
3094 @@ -534,14 +534,14 @@ int ib_register_device(struct ib_device *device,
3095 ret = device->query_device(device, &device->attrs, &uhw);
3096 if (ret) {
3097 pr_warn("Couldn't query the device attributes\n");
3098 - goto cache_cleanup;
3099 + goto cg_cleanup;
3100 }
3101
3102 ret = ib_device_register_sysfs(device, port_callback);
3103 if (ret) {
3104 pr_warn("Couldn't register device %s with driver model\n",
3105 device->name);
3106 - goto cache_cleanup;
3107 + goto cg_cleanup;
3108 }
3109
3110 device->reg_state = IB_DEV_REGISTERED;
3111 @@ -557,6 +557,8 @@ int ib_register_device(struct ib_device *device,
3112 mutex_unlock(&device_mutex);
3113 return 0;
3114
3115 +cg_cleanup:
3116 + ib_device_unregister_rdmacg(device);
3117 cache_cleanup:
3118 ib_cache_cleanup_one(device);
3119 ib_cache_release_one(device);
3120 diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
3121 index 4e1f76730855..9cb801d1fe54 100644
3122 --- a/drivers/infiniband/core/rdma_core.c
3123 +++ b/drivers/infiniband/core/rdma_core.c
3124 @@ -407,13 +407,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
3125 return ret;
3126 }
3127
3128 -static void lockdep_check(struct ib_uobject *uobj, bool exclusive)
3129 +static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
3130 {
3131 #ifdef CONFIG_LOCKDEP
3132 if (exclusive)
3133 - WARN_ON(atomic_read(&uobj->usecnt) > 0);
3134 + WARN_ON(atomic_read(&uobj->usecnt) != -1);
3135 else
3136 - WARN_ON(atomic_read(&uobj->usecnt) == -1);
3137 + WARN_ON(atomic_read(&uobj->usecnt) <= 0);
3138 #endif
3139 }
3140
3141 @@ -452,7 +452,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
3142 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
3143 return 0;
3144 }
3145 - lockdep_check(uobj, true);
3146 + assert_uverbs_usecnt(uobj, true);
3147 ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
3148
3149 up_read(&ucontext->cleanup_rwsem);
3150 @@ -482,7 +482,7 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
3151 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
3152 return 0;
3153 }
3154 - lockdep_check(uobject, true);
3155 + assert_uverbs_usecnt(uobject, true);
3156 ret = uobject->type->type_class->remove_commit(uobject,
3157 RDMA_REMOVE_DESTROY);
3158 if (ret)
3159 @@ -569,7 +569,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
3160
3161 void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
3162 {
3163 - lockdep_check(uobj, exclusive);
3164 + assert_uverbs_usecnt(uobj, exclusive);
3165 uobj->type->type_class->lookup_put(uobj, exclusive);
3166 /*
3167 * In order to unlock an object, either decrease its usecnt for
3168 diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
3169 index eb85b546e223..c8b3a45e9edc 100644
3170 --- a/drivers/infiniband/core/ucma.c
3171 +++ b/drivers/infiniband/core/ucma.c
3172 @@ -1148,6 +1148,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
3173 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
3174 return -EFAULT;
3175
3176 + if (cmd.qp_state > IB_QPS_ERR)
3177 + return -EINVAL;
3178 +
3179 ctx = ucma_get_ctx(file, cmd.id);
3180 if (IS_ERR(ctx))
3181 return PTR_ERR(ctx);
3182 @@ -1293,6 +1296,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
3183 if (IS_ERR(ctx))
3184 return PTR_ERR(ctx);
3185
3186 + if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
3187 + return -EINVAL;
3188 +
3189 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
3190 cmd.optlen);
3191 if (IS_ERR(optval)) {
3192 diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
3193 index 2aa53f427685..faedc080a5e6 100644
3194 --- a/drivers/infiniband/hw/mlx5/cq.c
3195 +++ b/drivers/infiniband/hw/mlx5/cq.c
3196 @@ -1154,7 +1154,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
3197 if (ucmd.reserved0 || ucmd.reserved1)
3198 return -EINVAL;
3199
3200 - umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
3201 + /* check multiplication overflow */
3202 + if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
3203 + return -EINVAL;
3204 +
3205 + umem = ib_umem_get(context, ucmd.buf_addr,
3206 + (size_t)ucmd.cqe_size * entries,
3207 IB_ACCESS_LOCAL_WRITE, 1);
3208 if (IS_ERR(umem)) {
3209 err = PTR_ERR(umem);
3210 diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
3211 index 231b043e2806..000937fe53ec 100644
3212 --- a/drivers/infiniband/hw/mlx5/mr.c
3213 +++ b/drivers/infiniband/hw/mlx5/mr.c
3214 @@ -1813,7 +1813,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
3215
3216 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
3217 mr->ibmr.length = 0;
3218 - mr->ndescs = sg_nents;
3219
3220 for_each_sg(sgl, sg, sg_nents, i) {
3221 if (unlikely(i >= mr->max_descs))
3222 @@ -1825,6 +1824,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
3223
3224 sg_offset = 0;
3225 }
3226 + mr->ndescs = i;
3227
3228 if (sg_offset_p)
3229 *sg_offset_p = sg_offset;
3230 diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
3231 index 1f316d66e6f7..41614c185918 100644
3232 --- a/drivers/input/keyboard/matrix_keypad.c
3233 +++ b/drivers/input/keyboard/matrix_keypad.c
3234 @@ -218,8 +218,10 @@ static void matrix_keypad_stop(struct input_dev *dev)
3235 {
3236 struct matrix_keypad *keypad = input_get_drvdata(dev);
3237
3238 + spin_lock_irq(&keypad->lock);
3239 keypad->stopped = true;
3240 - mb();
3241 + spin_unlock_irq(&keypad->lock);
3242 +
3243 flush_work(&keypad->work.work);
3244 /*
3245 * matrix_keypad_scan() will leave IRQs enabled;
3246 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
3247 index 25bf003fb198..9417170f180a 100644
3248 --- a/drivers/md/bcache/super.c
3249 +++ b/drivers/md/bcache/super.c
3250 @@ -938,6 +938,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
3251 uint32_t rtime = cpu_to_le32(get_seconds());
3252 struct uuid_entry *u;
3253 char buf[BDEVNAME_SIZE];
3254 + struct cached_dev *exist_dc, *t;
3255
3256 bdevname(dc->bdev, buf);
3257
3258 @@ -961,6 +962,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
3259 return -EINVAL;
3260 }
3261
3262 + /* Check whether already attached */
3263 + list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
3264 + if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
3265 + pr_err("Tried to attach %s but duplicate UUID already attached",
3266 + buf);
3267 +
3268 + return -EINVAL;
3269 + }
3270 + }
3271 +
3272 u = uuid_find(c, dc->sb.uuid);
3273
3274 if (u &&
3275 @@ -1181,7 +1192,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
3276
3277 return;
3278 err:
3279 - pr_notice("error opening %s: %s", bdevname(bdev, name), err);
3280 + pr_notice("error %s: %s", bdevname(bdev, name), err);
3281 bcache_device_stop(&dc->disk);
3282 }
3283
3284 @@ -1849,6 +1860,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
3285 const char *err = NULL; /* must be set for any error case */
3286 int ret = 0;
3287
3288 + bdevname(bdev, name);
3289 +
3290 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
3291 ca->bdev = bdev;
3292 ca->bdev->bd_holder = ca;
3293 @@ -1857,11 +1870,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
3294 ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
3295 get_page(sb_page);
3296
3297 - if (blk_queue_discard(bdev_get_queue(ca->bdev)))
3298 + if (blk_queue_discard(bdev_get_queue(bdev)))
3299 ca->discard = CACHE_DISCARD(&ca->sb);
3300
3301 ret = cache_alloc(ca);
3302 if (ret != 0) {
3303 + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
3304 if (ret == -ENOMEM)
3305 err = "cache_alloc(): -ENOMEM";
3306 else
3307 @@ -1884,14 +1898,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
3308 goto out;
3309 }
3310
3311 - pr_info("registered cache device %s", bdevname(bdev, name));
3312 + pr_info("registered cache device %s", name);
3313
3314 out:
3315 kobject_put(&ca->kobj);
3316
3317 err:
3318 if (err)
3319 - pr_notice("error opening %s: %s", bdevname(bdev, name), err);
3320 + pr_notice("error %s: %s", name, err);
3321
3322 return ret;
3323 }
3324 @@ -1980,6 +1994,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
3325 if (err)
3326 goto err_close;
3327
3328 + err = "failed to register device";
3329 if (SB_IS_BDEV(sb)) {
3330 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
3331 if (!dc)
3332 @@ -1994,7 +2009,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
3333 goto err_close;
3334
3335 if (register_cache(sb, sb_page, bdev, ca) != 0)
3336 - goto err_close;
3337 + goto err;
3338 }
3339 out:
3340 if (sb_page)
3341 @@ -2007,7 +2022,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
3342 err_close:
3343 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
3344 err:
3345 - pr_info("error opening %s: %s", path, err);
3346 + pr_info("error %s: %s", path, err);
3347 ret = -EINVAL;
3348 goto out;
3349 }
3350 diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
3351 index 6d416fdc25cb..1e17e6421da3 100644
3352 --- a/drivers/md/dm-bufio.c
3353 +++ b/drivers/md/dm-bufio.c
3354 @@ -386,9 +386,6 @@ static void __cache_size_refresh(void)
3355 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
3356 enum data_mode *data_mode)
3357 {
3358 - unsigned noio_flag;
3359 - void *ptr;
3360 -
3361 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
3362 *data_mode = DATA_MODE_SLAB;
3363 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
3364 @@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
3365 * all allocations done by this process (including pagetables) are done
3366 * as if GFP_NOIO was specified.
3367 */
3368 + if (gfp_mask & __GFP_NORETRY) {
3369 + unsigned noio_flag = memalloc_noio_save();
3370 + void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
3371
3372 - if (gfp_mask & __GFP_NORETRY)
3373 - noio_flag = memalloc_noio_save();
3374 -
3375 - ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
3376 -
3377 - if (gfp_mask & __GFP_NORETRY)
3378 memalloc_noio_restore(noio_flag);
3379 + return ptr;
3380 + }
3381
3382 - return ptr;
3383 + return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
3384 }
3385
3386 /*
3387 diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
3388 index 81e2157a7cfb..bc3e2d8d0cce 100644
3389 --- a/drivers/pci/dwc/pcie-designware-host.c
3390 +++ b/drivers/pci/dwc/pcie-designware-host.c
3391 @@ -607,7 +607,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
3392 /* setup bus numbers */
3393 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
3394 val &= 0xff000000;
3395 - val |= 0x00010100;
3396 + val |= 0x00ff0100;
3397 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
3398
3399 /* setup command register */
3400 diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
3401 index 72c8b3e1022b..e0a9c445ed67 100644
3402 --- a/drivers/regulator/stm32-vrefbuf.c
3403 +++ b/drivers/regulator/stm32-vrefbuf.c
3404 @@ -51,7 +51,7 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev)
3405 * arbitrary timeout.
3406 */
3407 ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val,
3408 - !(val & STM32_VRR), 650, 10000);
3409 + val & STM32_VRR, 650, 10000);
3410 if (ret) {
3411 dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n");
3412 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
3413 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
3414 index 57bf43e34863..dd9464920456 100644
3415 --- a/drivers/scsi/hosts.c
3416 +++ b/drivers/scsi/hosts.c
3417 @@ -328,8 +328,6 @@ static void scsi_host_dev_release(struct device *dev)
3418 if (shost->work_q)
3419 destroy_workqueue(shost->work_q);
3420
3421 - destroy_rcu_head(&shost->rcu);
3422 -
3423 if (shost->shost_state == SHOST_CREATED) {
3424 /*
3425 * Free the shost_dev device name here if scsi_host_alloc()
3426 @@ -404,7 +402,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
3427 INIT_LIST_HEAD(&shost->starved_list);
3428 init_waitqueue_head(&shost->host_wait);
3429 mutex_init(&shost->scan_mutex);
3430 - init_rcu_head(&shost->rcu);
3431
3432 index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
3433 if (index < 0)
3434 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
3435 index 486c075998f6..67b305531ec3 100644
3436 --- a/drivers/scsi/qla2xxx/qla_def.h
3437 +++ b/drivers/scsi/qla2xxx/qla_def.h
3438 @@ -315,6 +315,29 @@ struct srb_cmd {
3439 /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
3440 #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
3441
3442 +/*
3443 + * 24 bit port ID type definition.
3444 + */
3445 +typedef union {
3446 + uint32_t b24 : 24;
3447 +
3448 + struct {
3449 +#ifdef __BIG_ENDIAN
3450 + uint8_t domain;
3451 + uint8_t area;
3452 + uint8_t al_pa;
3453 +#elif defined(__LITTLE_ENDIAN)
3454 + uint8_t al_pa;
3455 + uint8_t area;
3456 + uint8_t domain;
3457 +#else
3458 +#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
3459 +#endif
3460 + uint8_t rsvd_1;
3461 + } b;
3462 +} port_id_t;
3463 +#define INVALID_PORT_ID 0xFFFFFF
3464 +
3465 struct els_logo_payload {
3466 uint8_t opcode;
3467 uint8_t rsvd[3];
3468 @@ -332,6 +355,7 @@ struct ct_arg {
3469 u32 rsp_size;
3470 void *req;
3471 void *rsp;
3472 + port_id_t id;
3473 };
3474
3475 /*
3476 @@ -480,6 +504,7 @@ typedef struct srb {
3477 const char *name;
3478 int iocbs;
3479 struct qla_qpair *qpair;
3480 + struct list_head elem;
3481 u32 gen1; /* scratch */
3482 u32 gen2; /* scratch */
3483 union {
3484 @@ -2144,28 +2169,6 @@ struct imm_ntfy_from_isp {
3485 #define REQUEST_ENTRY_SIZE (sizeof(request_t))
3486
3487
3488 -/*
3489 - * 24 bit port ID type definition.
3490 - */
3491 -typedef union {
3492 - uint32_t b24 : 24;
3493 -
3494 - struct {
3495 -#ifdef __BIG_ENDIAN
3496 - uint8_t domain;
3497 - uint8_t area;
3498 - uint8_t al_pa;
3499 -#elif defined(__LITTLE_ENDIAN)
3500 - uint8_t al_pa;
3501 - uint8_t area;
3502 - uint8_t domain;
3503 -#else
3504 -#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
3505 -#endif
3506 - uint8_t rsvd_1;
3507 - } b;
3508 -} port_id_t;
3509 -#define INVALID_PORT_ID 0xFFFFFF
3510
3511 /*
3512 * Switch info gathering structure.
3513 @@ -4082,6 +4085,7 @@ typedef struct scsi_qla_host {
3514 #define LOOP_READY 5
3515 #define LOOP_DEAD 6
3516
3517 + unsigned long relogin_jif;
3518 unsigned long dpc_flags;
3519 #define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
3520 #define RESET_ACTIVE 1
3521 @@ -4223,6 +4227,7 @@ typedef struct scsi_qla_host {
3522 wait_queue_head_t fcport_waitQ;
3523 wait_queue_head_t vref_waitq;
3524 uint8_t min_link_speed_feat;
3525 + struct list_head gpnid_list;
3526 } scsi_qla_host_t;
3527
3528 struct qla27xx_image_status {
3529 diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
3530 index bc3db6abc9a0..59ecc4eda6cd 100644
3531 --- a/drivers/scsi/qla2xxx/qla_gs.c
3532 +++ b/drivers/scsi/qla2xxx/qla_gs.c
3533 @@ -175,6 +175,9 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
3534 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3535 }
3536 break;
3537 + case CS_TIMEOUT:
3538 + rval = QLA_FUNCTION_TIMEOUT;
3539 + /* drop through */
3540 default:
3541 ql_dbg(ql_dbg_disc, vha, 0x2033,
3542 "%s failed, completion status (%x) on port_id: "
3543 @@ -2833,7 +2836,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
3544 }
3545 } else { /* fcport->d_id.b24 != ea->id.b24 */
3546 fcport->d_id.b24 = ea->id.b24;
3547 - if (fcport->deleted == QLA_SESS_DELETED) {
3548 + if (fcport->deleted != QLA_SESS_DELETED) {
3549 ql_dbg(ql_dbg_disc, vha, 0x2021,
3550 "%s %d %8phC post del sess\n",
3551 __func__, __LINE__, fcport->port_name);
3552 @@ -2889,9 +2892,22 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
3553 ea.rc = res;
3554 ea.event = FCME_GIDPN_DONE;
3555
3556 - ql_dbg(ql_dbg_disc, vha, 0x204f,
3557 - "Async done-%s res %x, WWPN %8phC ID %3phC \n",
3558 - sp->name, res, fcport->port_name, id);
3559 + if (res == QLA_FUNCTION_TIMEOUT) {
3560 + ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3561 + "Async done-%s WWPN %8phC timed out.\n",
3562 + sp->name, fcport->port_name);
3563 + qla24xx_post_gidpn_work(sp->vha, fcport);
3564 + sp->free(sp);
3565 + return;
3566 + } else if (res) {
3567 + ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3568 + "Async done-%s fail res %x, WWPN %8phC\n",
3569 + sp->name, res, fcport->port_name);
3570 + } else {
3571 + ql_dbg(ql_dbg_disc, vha, 0x204f,
3572 + "Async done-%s good WWPN %8phC ID %3phC\n",
3573 + sp->name, fcport->port_name, id);
3574 + }
3575
3576 qla2x00_fcport_event_handler(vha, &ea);
3577
3578 @@ -3205,11 +3221,18 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
3579 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3580 struct event_arg ea;
3581 struct qla_work_evt *e;
3582 + unsigned long flags;
3583
3584 - ql_dbg(ql_dbg_disc, vha, 0x2066,
3585 - "Async done-%s res %x ID %3phC. %8phC\n",
3586 - sp->name, res, ct_req->req.port_id.port_id,
3587 - ct_rsp->rsp.gpn_id.port_name);
3588 + if (res)
3589 + ql_dbg(ql_dbg_disc, vha, 0x2066,
3590 + "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3591 + sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
3592 + ct_rsp->rsp.gpn_id.port_name);
3593 + else
3594 + ql_dbg(ql_dbg_disc, vha, 0x2066,
3595 + "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3596 + sp->name, sp->gen1, ct_req->req.port_id.port_id,
3597 + ct_rsp->rsp.gpn_id.port_name);
3598
3599 memset(&ea, 0, sizeof(ea));
3600 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3601 @@ -3220,6 +3243,22 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
3602 ea.rc = res;
3603 ea.event = FCME_GPNID_DONE;
3604
3605 + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3606 + list_del(&sp->elem);
3607 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3608 +
3609 + if (res) {
3610 + if (res == QLA_FUNCTION_TIMEOUT)
3611 + qla24xx_post_gpnid_work(sp->vha, &ea.id);
3612 + sp->free(sp);
3613 + return;
3614 + } else if (sp->gen1) {
3615 + /* There was anoter RSNC for this Nport ID */
3616 + qla24xx_post_gpnid_work(sp->vha, &ea.id);
3617 + sp->free(sp);
3618 + return;
3619 + }
3620 +
3621 qla2x00_fcport_event_handler(vha, &ea);
3622
3623 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE);
3624 @@ -3253,8 +3292,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3625 {
3626 int rval = QLA_FUNCTION_FAILED;
3627 struct ct_sns_req *ct_req;
3628 - srb_t *sp;
3629 + srb_t *sp, *tsp;
3630 struct ct_sns_pkt *ct_sns;
3631 + unsigned long flags;
3632
3633 if (!vha->flags.online)
3634 goto done;
3635 @@ -3265,8 +3305,22 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3636
3637 sp->type = SRB_CT_PTHRU_CMD;
3638 sp->name = "gpnid";
3639 + sp->u.iocb_cmd.u.ctarg.id = *id;
3640 + sp->gen1 = 0;
3641 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3642
3643 + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3644 + list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3645 + if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3646 + tsp->gen1++;
3647 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3648 + sp->free(sp);
3649 + goto done;
3650 + }
3651 + }
3652 + list_add_tail(&sp->elem, &vha->gpnid_list);
3653 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3654 +
3655 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3656 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3657 GFP_KERNEL);
3658 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
3659 index b5b48ddca962..9603886737b5 100644
3660 --- a/drivers/scsi/qla2xxx/qla_init.c
3661 +++ b/drivers/scsi/qla2xxx/qla_init.c
3662 @@ -864,6 +864,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
3663 int rval = ea->rc;
3664 fc_port_t *fcport = ea->fcport;
3665 unsigned long flags;
3666 + u16 opt = ea->sp->u.iocb_cmd.u.mbx.out_mb[10];
3667
3668 fcport->flags &= ~FCF_ASYNC_SENT;
3669
3670 @@ -894,7 +895,8 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
3671 }
3672
3673 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3674 - ea->fcport->login_gen++;
3675 + if (opt != PDO_FORCE_ADISC)
3676 + ea->fcport->login_gen++;
3677 ea->fcport->deleted = 0;
3678 ea->fcport->logout_on_delete = 1;
3679
3680 @@ -918,6 +920,13 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
3681
3682 qla24xx_post_gpsc_work(vha, fcport);
3683 }
3684 + } else if (ea->fcport->login_succ) {
3685 + /*
3686 + * We have an existing session. A late RSCN delivery
3687 + * must have triggered the session to be re-validate.
3688 + * session is still valid.
3689 + */
3690 + fcport->disc_state = DSC_LOGIN_COMPLETE;
3691 }
3692 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3693 } /* gpdb event */
3694 @@ -964,7 +973,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
3695 ql_dbg(ql_dbg_disc, vha, 0x20bd,
3696 "%s %d %8phC post gnl\n",
3697 __func__, __LINE__, fcport->port_name);
3698 - qla24xx_async_gnl(vha, fcport);
3699 + qla24xx_post_gnl_work(vha, fcport);
3700 } else {
3701 ql_dbg(ql_dbg_disc, vha, 0x20bf,
3702 "%s %d %8phC post login\n",
3703 @@ -1133,7 +1142,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
3704 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
3705 __func__, __LINE__, fcport->port_name);
3706
3707 - qla24xx_async_gidpn(vha, fcport);
3708 + qla24xx_post_gidpn_work(vha, fcport);
3709 return;
3710 }
3711
3712 @@ -1348,6 +1357,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
3713 srb_t *sp = ptr;
3714 struct srb_iocb *abt = &sp->u.iocb_cmd;
3715
3716 + del_timer(&sp->u.iocb_cmd.timer);
3717 complete(&abt->u.abt.comp);
3718 }
3719
3720 @@ -1445,6 +1455,8 @@ static void
3721 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
3722 {
3723 port_id_t cid; /* conflict Nport id */
3724 + u16 lid;
3725 + struct fc_port *conflict_fcport;
3726
3727 switch (ea->data[0]) {
3728 case MBS_COMMAND_COMPLETE:
3729 @@ -1460,8 +1472,12 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
3730 qla24xx_post_prli_work(vha, ea->fcport);
3731 } else {
3732 ql_dbg(ql_dbg_disc, vha, 0x20ea,
3733 - "%s %d %8phC post gpdb\n",
3734 - __func__, __LINE__, ea->fcport->port_name);
3735 + "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
3736 + __func__, __LINE__, ea->fcport->port_name,
3737 + ea->fcport->loop_id, ea->fcport->d_id.b24);
3738 +
3739 + set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
3740 + ea->fcport->loop_id = FC_NO_LOOP_ID;
3741 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
3742 ea->fcport->logout_on_delete = 1;
3743 ea->fcport->send_els_logo = 0;
3744 @@ -1506,8 +1522,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
3745 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
3746 ea->fcport->d_id.b.al_pa);
3747
3748 - qla2x00_clear_loop_id(ea->fcport);
3749 - qla24xx_post_gidpn_work(vha, ea->fcport);
3750 + lid = ea->iop[1] & 0xffff;
3751 + qlt_find_sess_invalidate_other(vha,
3752 + wwn_to_u64(ea->fcport->port_name),
3753 + ea->fcport->d_id, lid, &conflict_fcport);
3754 +
3755 + if (conflict_fcport) {
3756 + /*
3757 + * Another fcport share the same loop_id/nport id.
3758 + * Conflict fcport needs to finish cleanup before this
3759 + * fcport can proceed to login.
3760 + */
3761 + conflict_fcport->conflict = ea->fcport;
3762 + ea->fcport->login_pause = 1;
3763 +
3764 + ql_dbg(ql_dbg_disc, vha, 0x20ed,
3765 + "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
3766 + __func__, __LINE__, ea->fcport->port_name,
3767 + ea->fcport->d_id.b24, lid);
3768 + qla2x00_clear_loop_id(ea->fcport);
3769 + qla24xx_post_gidpn_work(vha, ea->fcport);
3770 + } else {
3771 + ql_dbg(ql_dbg_disc, vha, 0x20ed,
3772 + "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
3773 + __func__, __LINE__, ea->fcport->port_name,
3774 + ea->fcport->d_id.b24, lid);
3775 +
3776 + qla2x00_clear_loop_id(ea->fcport);
3777 + set_bit(lid, vha->hw->loop_id_map);
3778 + ea->fcport->loop_id = lid;
3779 + ea->fcport->keep_nport_handle = 0;
3780 + qlt_schedule_sess_for_deletion(ea->fcport, false);
3781 + }
3782 break;
3783 }
3784 return;
3785 @@ -8047,9 +8093,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
3786 int ret = QLA_FUNCTION_FAILED;
3787 struct qla_hw_data *ha = qpair->hw;
3788
3789 - if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
3790 - goto fail;
3791 -
3792 qpair->delete_in_progress = 1;
3793 while (atomic_read(&qpair->ref_count))
3794 msleep(500);
3795 @@ -8057,6 +8100,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
3796 ret = qla25xx_delete_req_que(vha, qpair->req);
3797 if (ret != QLA_SUCCESS)
3798 goto fail;
3799 +
3800 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
3801 if (ret != QLA_SUCCESS)
3802 goto fail;
3803 diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
3804 index 2f94159186d7..63bea6a65d51 100644
3805 --- a/drivers/scsi/qla2xxx/qla_iocb.c
3806 +++ b/drivers/scsi/qla2xxx/qla_iocb.c
3807 @@ -2392,26 +2392,13 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
3808 srb_t *sp = data;
3809 fc_port_t *fcport = sp->fcport;
3810 struct scsi_qla_host *vha = sp->vha;
3811 - struct qla_hw_data *ha = vha->hw;
3812 struct srb_iocb *lio = &sp->u.iocb_cmd;
3813 - unsigned long flags = 0;
3814
3815 ql_dbg(ql_dbg_io, vha, 0x3069,
3816 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
3817 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
3818 fcport->d_id.b.al_pa);
3819
3820 - /* Abort the exchange */
3821 - spin_lock_irqsave(&ha->hardware_lock, flags);
3822 - if (ha->isp_ops->abort_command(sp)) {
3823 - ql_dbg(ql_dbg_io, vha, 0x3070,
3824 - "mbx abort_command failed.\n");
3825 - } else {
3826 - ql_dbg(ql_dbg_io, vha, 0x3071,
3827 - "mbx abort_command success.\n");
3828 - }
3829 - spin_unlock_irqrestore(&ha->hardware_lock, flags);
3830 -
3831 complete(&lio->u.els_logo.comp);
3832 }
3833
3834 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
3835 index 9d9668aac6f6..d95b879c2bca 100644
3836 --- a/drivers/scsi/qla2xxx/qla_isr.c
3837 +++ b/drivers/scsi/qla2xxx/qla_isr.c
3838 @@ -1569,7 +1569,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
3839 /* borrowing sts_entry_24xx.comp_status.
3840 same location as ct_entry_24xx.comp_status
3841 */
3842 - res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
3843 + res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
3844 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
3845 sp->name);
3846 sp->done(sp, res);
3847 @@ -2341,7 +2341,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3848 int res = 0;
3849 uint16_t state_flags = 0;
3850 uint16_t retry_delay = 0;
3851 - uint8_t no_logout = 0;
3852
3853 sts = (sts_entry_t *) pkt;
3854 sts24 = (struct sts_entry_24xx *) pkt;
3855 @@ -2612,7 +2611,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3856 break;
3857
3858 case CS_PORT_LOGGED_OUT:
3859 - no_logout = 1;
3860 case CS_PORT_CONFIG_CHG:
3861 case CS_PORT_BUSY:
3862 case CS_INCOMPLETE:
3863 @@ -2643,9 +2641,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3864 port_state_str[atomic_read(&fcport->state)],
3865 comp_status);
3866
3867 - if (no_logout)
3868 - fcport->logout_on_delete = 0;
3869 -
3870 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
3871 qlt_schedule_sess_for_deletion_lock(fcport);
3872 }
3873 diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
3874 index 99502fa90810..2d909e12e23a 100644
3875 --- a/drivers/scsi/qla2xxx/qla_mbx.c
3876 +++ b/drivers/scsi/qla2xxx/qla_mbx.c
3877 @@ -6078,8 +6078,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
3878 }
3879
3880 /* Check for logged in state. */
3881 - if (current_login_state != PDS_PRLI_COMPLETE &&
3882 - last_login_state != PDS_PRLI_COMPLETE) {
3883 + if (current_login_state != PDS_PRLI_COMPLETE) {
3884 ql_dbg(ql_dbg_mbx, vha, 0x119a,
3885 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
3886 current_login_state, last_login_state, fcport->loop_id);
3887 diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
3888 index c0f8f6c17b79..78df7cfca568 100644
3889 --- a/drivers/scsi/qla2xxx/qla_mid.c
3890 +++ b/drivers/scsi/qla2xxx/qla_mid.c
3891 @@ -343,15 +343,21 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
3892 "FCPort update end.\n");
3893 }
3894
3895 - if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
3896 - !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
3897 - atomic_read(&vha->loop_state) != LOOP_DOWN) {
3898 -
3899 - ql_dbg(ql_dbg_dpc, vha, 0x4018,
3900 - "Relogin needed scheduled.\n");
3901 - qla2x00_relogin(vha);
3902 - ql_dbg(ql_dbg_dpc, vha, 0x4019,
3903 - "Relogin needed end.\n");
3904 + if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
3905 + !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
3906 + atomic_read(&vha->loop_state) != LOOP_DOWN) {
3907 +
3908 + if (!vha->relogin_jif ||
3909 + time_after_eq(jiffies, vha->relogin_jif)) {
3910 + vha->relogin_jif = jiffies + HZ;
3911 + clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3912 +
3913 + ql_dbg(ql_dbg_dpc, vha, 0x4018,
3914 + "Relogin needed scheduled.\n");
3915 + qla2x00_relogin(vha);
3916 + ql_dbg(ql_dbg_dpc, vha, 0x4019,
3917 + "Relogin needed end.\n");
3918 + }
3919 }
3920
3921 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
3922 @@ -569,14 +575,15 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3923 int
3924 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
3925 {
3926 - int ret = -1;
3927 + int ret = QLA_SUCCESS;
3928
3929 - if (req) {
3930 + if (req && vha->flags.qpairs_req_created) {
3931 req->options |= BIT_0;
3932 ret = qla25xx_init_req_que(vha, req);
3933 + if (ret != QLA_SUCCESS)
3934 + return QLA_FUNCTION_FAILED;
3935 }
3936 - if (ret == QLA_SUCCESS)
3937 - qla25xx_free_req_que(vha, req);
3938 + qla25xx_free_req_que(vha, req);
3939
3940 return ret;
3941 }
3942 @@ -584,14 +591,15 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
3943 int
3944 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3945 {
3946 - int ret = -1;
3947 + int ret = QLA_SUCCESS;
3948
3949 - if (rsp) {
3950 + if (rsp && vha->flags.qpairs_rsp_created) {
3951 rsp->options |= BIT_0;
3952 ret = qla25xx_init_rsp_que(vha, rsp);
3953 + if (ret != QLA_SUCCESS)
3954 + return QLA_FUNCTION_FAILED;
3955 }
3956 - if (ret == QLA_SUCCESS)
3957 - qla25xx_free_rsp_que(vha, rsp);
3958 + qla25xx_free_rsp_que(vha, rsp);
3959
3960 return ret;
3961 }
3962 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
3963 index 6eaaa326e508..cfe7654f6bd3 100644
3964 --- a/drivers/scsi/qla2xxx/qla_os.c
3965 +++ b/drivers/scsi/qla2xxx/qla_os.c
3966 @@ -3003,9 +3003,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3967 base_vha = qla2x00_create_host(sht, ha);
3968 if (!base_vha) {
3969 ret = -ENOMEM;
3970 - qla2x00_mem_free(ha);
3971 - qla2x00_free_req_que(ha, req);
3972 - qla2x00_free_rsp_que(ha, rsp);
3973 goto probe_hw_failed;
3974 }
3975
3976 @@ -3066,7 +3063,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3977 /* Set up the irqs */
3978 ret = qla2x00_request_irqs(ha, rsp);
3979 if (ret)
3980 - goto probe_init_failed;
3981 + goto probe_hw_failed;
3982
3983 /* Alloc arrays of request and response ring ptrs */
3984 if (!qla2x00_alloc_queues(ha, req, rsp)) {
3985 @@ -3177,10 +3174,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3986 host->can_queue, base_vha->req,
3987 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
3988
3989 + ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
3990 +
3991 if (ha->mqenable) {
3992 bool mq = false;
3993 bool startit = false;
3994 - ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
3995
3996 if (QLA_TGT_MODE_ENABLED()) {
3997 mq = true;
3998 @@ -3374,6 +3372,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3999 scsi_host_put(base_vha->host);
4000
4001 probe_hw_failed:
4002 + qla2x00_mem_free(ha);
4003 + qla2x00_free_req_que(ha, req);
4004 + qla2x00_free_rsp_que(ha, rsp);
4005 qla2x00_clear_drv_active(ha);
4006
4007 iospace_config_failed:
4008 @@ -4498,6 +4499,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4009 INIT_LIST_HEAD(&vha->qp_list);
4010 INIT_LIST_HEAD(&vha->gnl.fcports);
4011 INIT_LIST_HEAD(&vha->nvme_rport_list);
4012 + INIT_LIST_HEAD(&vha->gpnid_list);
4013
4014 spin_lock_init(&vha->work_lock);
4015 spin_lock_init(&vha->cmd_list_lock);
4016 @@ -4732,11 +4734,11 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4017 } else {
4018 list_add_tail(&fcport->list, &vha->vp_fcports);
4019
4020 - if (pla) {
4021 - qlt_plogi_ack_link(vha, pla, fcport,
4022 - QLT_PLOGI_LINK_SAME_WWN);
4023 - pla->ref_count--;
4024 - }
4025 + }
4026 + if (pla) {
4027 + qlt_plogi_ack_link(vha, pla, fcport,
4028 + QLT_PLOGI_LINK_SAME_WWN);
4029 + pla->ref_count--;
4030 }
4031 }
4032 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4033 @@ -4858,7 +4860,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
4034 */
4035 if (atomic_read(&fcport->state) != FCS_ONLINE &&
4036 fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
4037 - fcport->login_retry--;
4038 +
4039 if (fcport->flags & FCF_FABRIC_DEVICE) {
4040 ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
4041 "%s %8phC DS %d LS %d\n", __func__,
4042 @@ -4869,6 +4871,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
4043 ea.fcport = fcport;
4044 qla2x00_fcport_event_handler(vha, &ea);
4045 } else {
4046 + fcport->login_retry--;
4047 status = qla2x00_local_device_login(vha,
4048 fcport);
4049 if (status == QLA_SUCCESS) {
4050 @@ -5851,16 +5854,21 @@ qla2x00_do_dpc(void *data)
4051 }
4052
4053 /* Retry each device up to login retry count */
4054 - if ((test_and_clear_bit(RELOGIN_NEEDED,
4055 - &base_vha->dpc_flags)) &&
4056 + if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
4057 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
4058 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
4059
4060 - ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
4061 - "Relogin scheduled.\n");
4062 - qla2x00_relogin(base_vha);
4063 - ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
4064 - "Relogin end.\n");
4065 + if (!base_vha->relogin_jif ||
4066 + time_after_eq(jiffies, base_vha->relogin_jif)) {
4067 + base_vha->relogin_jif = jiffies + HZ;
4068 + clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
4069 +
4070 + ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
4071 + "Relogin scheduled.\n");
4072 + qla2x00_relogin(base_vha);
4073 + ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
4074 + "Relogin end.\n");
4075 + }
4076 }
4077 loop_resync_check:
4078 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
4079 @@ -6591,9 +6599,14 @@ qla83xx_disable_laser(scsi_qla_host_t *vha)
4080
4081 static int qla2xxx_map_queues(struct Scsi_Host *shost)
4082 {
4083 + int rc;
4084 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
4085
4086 - return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
4087 + if (USER_CTRL_IRQ(vha->hw))
4088 + rc = blk_mq_map_queues(&shost->tag_set);
4089 + else
4090 + rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
4091 + return rc;
4092 }
4093
4094 static const struct pci_error_handlers qla2xxx_err_handler = {
4095 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
4096 index f05cfc83c9c8..040a76011ffa 100644
4097 --- a/drivers/scsi/qla2xxx/qla_target.c
4098 +++ b/drivers/scsi/qla2xxx/qla_target.c
4099 @@ -665,7 +665,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
4100 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
4101
4102 sp->u.iocb_cmd.u.nack.ntfy = ntfy;
4103 -
4104 + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4105 sp->done = qla2x00_async_nack_sp_done;
4106
4107 rval = qla2x00_start_sp(sp);
4108 @@ -974,7 +974,7 @@ static void qlt_free_session_done(struct work_struct *work)
4109 qlt_send_first_logo(vha, &logo);
4110 }
4111
4112 - if (sess->logout_on_delete) {
4113 + if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
4114 int rc;
4115
4116 rc = qla2x00_post_async_logout_work(vha, sess, NULL);
4117 @@ -1033,8 +1033,7 @@ static void qlt_free_session_done(struct work_struct *work)
4118 sess->login_succ = 0;
4119 }
4120
4121 - if (sess->chip_reset != ha->base_qpair->chip_reset)
4122 - qla2x00_clear_loop_id(sess);
4123 + qla2x00_clear_loop_id(sess);
4124
4125 if (sess->conflict) {
4126 sess->conflict->login_pause = 0;
4127 @@ -1205,7 +1204,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess,
4128 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
4129 "Scheduling sess %p for deletion\n", sess);
4130
4131 - schedule_work(&sess->del_work);
4132 + INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
4133 + queue_work(sess->vha->hw->wq, &sess->del_work);
4134 }
4135
4136 void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
4137 @@ -1560,8 +1560,11 @@ static void qlt_release(struct qla_tgt *tgt)
4138
4139 btree_destroy64(&tgt->lun_qpair_map);
4140
4141 - if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target)
4142 - ha->tgt.tgt_ops->remove_target(vha);
4143 + if (vha->vp_idx)
4144 + if (ha->tgt.tgt_ops &&
4145 + ha->tgt.tgt_ops->remove_target &&
4146 + vha->vha_tgt.target_lport_ptr)
4147 + ha->tgt.tgt_ops->remove_target(vha);
4148
4149 vha->vha_tgt.qla_tgt = NULL;
4150
4151 @@ -3708,7 +3711,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
4152 term = 1;
4153
4154 if (term)
4155 - qlt_term_ctio_exchange(qpair, ctio, cmd, status);
4156 + qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
4157
4158 return term;
4159 }
4160 @@ -4584,9 +4587,9 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4161 "Invalidating sess %p loop_id %d wwn %llx.\n",
4162 other_sess, other_sess->loop_id, other_wwn);
4163
4164 -
4165 other_sess->keep_nport_handle = 1;
4166 - *conflict_sess = other_sess;
4167 + if (other_sess->disc_state != DSC_DELETED)
4168 + *conflict_sess = other_sess;
4169 qlt_schedule_sess_for_deletion(other_sess,
4170 true);
4171 }
4172 @@ -5755,7 +5758,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4173 unsigned long flags;
4174 u8 newfcport = 0;
4175
4176 - fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
4177 + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4178 if (!fcport) {
4179 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
4180 "qla_target(%d): Allocation of tmp FC port failed",
4181 @@ -5784,6 +5787,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4182 tfcp->port_type = fcport->port_type;
4183 tfcp->supported_classes = fcport->supported_classes;
4184 tfcp->flags |= fcport->flags;
4185 + tfcp->scan_state = QLA_FCPORT_FOUND;
4186
4187 del = fcport;
4188 fcport = tfcp;
4189 diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
4190 index fa504ba83ade..cf70f0bb8375 100644
4191 --- a/drivers/scsi/scsi_error.c
4192 +++ b/drivers/scsi/scsi_error.c
4193 @@ -222,7 +222,8 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
4194
4195 static void scsi_eh_inc_host_failed(struct rcu_head *head)
4196 {
4197 - struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
4198 + struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
4199 + struct Scsi_Host *shost = scmd->device->host;
4200 unsigned long flags;
4201
4202 spin_lock_irqsave(shost->host_lock, flags);
4203 @@ -258,7 +259,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
4204 * Ensure that all tasks observe the host state change before the
4205 * host_failed change.
4206 */
4207 - call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
4208 + call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);
4209 }
4210
4211 /**
4212 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
4213 index 0d3696e9dddd..359386730523 100644
4214 --- a/drivers/scsi/scsi_lib.c
4215 +++ b/drivers/scsi/scsi_lib.c
4216 @@ -670,6 +670,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
4217 if (!blk_rq_is_scsi(req)) {
4218 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
4219 cmd->flags &= ~SCMD_INITIALIZED;
4220 + destroy_rcu_head(&cmd->rcu);
4221 }
4222
4223 if (req->mq_ctx) {
4224 @@ -1150,6 +1151,7 @@ void scsi_initialize_rq(struct request *rq)
4225 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
4226
4227 scsi_req_init(&cmd->req);
4228 + init_rcu_head(&cmd->rcu);
4229 cmd->jiffies_at_alloc = jiffies;
4230 cmd->retries = 0;
4231 }
4232 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
4233 index eb30f3e09a47..71458f493cf8 100644
4234 --- a/drivers/virtio/virtio_ring.c
4235 +++ b/drivers/virtio/virtio_ring.c
4236 @@ -428,8 +428,6 @@ static inline int virtqueue_add(struct virtqueue *_vq,
4237 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
4238 }
4239
4240 - vq->vq.num_free += total_sg;
4241 -
4242 if (indirect)
4243 kfree(desc);
4244
4245 diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
4246 index 67fbe35ce7cf..b0a158073abd 100644
4247 --- a/drivers/watchdog/hpwdt.c
4248 +++ b/drivers/watchdog/hpwdt.c
4249 @@ -28,16 +28,7 @@
4250 #include <linux/types.h>
4251 #include <linux/uaccess.h>
4252 #include <linux/watchdog.h>
4253 -#ifdef CONFIG_HPWDT_NMI_DECODING
4254 -#include <linux/dmi.h>
4255 -#include <linux/spinlock.h>
4256 -#include <linux/nmi.h>
4257 -#include <linux/kdebug.h>
4258 -#include <linux/notifier.h>
4259 -#include <asm/set_memory.h>
4260 -#endif /* CONFIG_HPWDT_NMI_DECODING */
4261 #include <asm/nmi.h>
4262 -#include <asm/frame.h>
4263
4264 #define HPWDT_VERSION "1.4.0"
4265 #define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
4266 @@ -48,10 +39,14 @@
4267 static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
4268 static unsigned int reload; /* the computed soft_margin */
4269 static bool nowayout = WATCHDOG_NOWAYOUT;
4270 +#ifdef CONFIG_HPWDT_NMI_DECODING
4271 +static unsigned int allow_kdump = 1;
4272 +#endif
4273 static char expect_release;
4274 static unsigned long hpwdt_is_open;
4275
4276 static void __iomem *pci_mem_addr; /* the PCI-memory address */
4277 +static unsigned long __iomem *hpwdt_nmistat;
4278 static unsigned long __iomem *hpwdt_timer_reg;
4279 static unsigned long __iomem *hpwdt_timer_con;
4280
4281 @@ -62,373 +57,6 @@ static const struct pci_device_id hpwdt_devices[] = {
4282 };
4283 MODULE_DEVICE_TABLE(pci, hpwdt_devices);
4284
4285 -#ifdef CONFIG_HPWDT_NMI_DECODING
4286 -#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
4287 -#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
4288 -#define PCI_BIOS32_PARAGRAPH_LEN 16
4289 -#define PCI_ROM_BASE1 0x000F0000
4290 -#define ROM_SIZE 0x10000
4291 -
4292 -struct bios32_service_dir {
4293 - u32 signature;
4294 - u32 entry_point;
4295 - u8 revision;
4296 - u8 length;
4297 - u8 checksum;
4298 - u8 reserved[5];
4299 -};
4300 -
4301 -/* type 212 */
4302 -struct smbios_cru64_info {
4303 - u8 type;
4304 - u8 byte_length;
4305 - u16 handle;
4306 - u32 signature;
4307 - u64 physical_address;
4308 - u32 double_length;
4309 - u32 double_offset;
4310 -};
4311 -#define SMBIOS_CRU64_INFORMATION 212
4312 -
4313 -/* type 219 */
4314 -struct smbios_proliant_info {
4315 - u8 type;
4316 - u8 byte_length;
4317 - u16 handle;
4318 - u32 power_features;
4319 - u32 omega_features;
4320 - u32 reserved;
4321 - u32 misc_features;
4322 -};
4323 -#define SMBIOS_ICRU_INFORMATION 219
4324 -
4325 -
4326 -struct cmn_registers {
4327 - union {
4328 - struct {
4329 - u8 ral;
4330 - u8 rah;
4331 - u16 rea2;
4332 - };
4333 - u32 reax;
4334 - } u1;
4335 - union {
4336 - struct {
4337 - u8 rbl;
4338 - u8 rbh;
4339 - u8 reb2l;
4340 - u8 reb2h;
4341 - };
4342 - u32 rebx;
4343 - } u2;
4344 - union {
4345 - struct {
4346 - u8 rcl;
4347 - u8 rch;
4348 - u16 rec2;
4349 - };
4350 - u32 recx;
4351 - } u3;
4352 - union {
4353 - struct {
4354 - u8 rdl;
4355 - u8 rdh;
4356 - u16 red2;
4357 - };
4358 - u32 redx;
4359 - } u4;
4360 -
4361 - u32 resi;
4362 - u32 redi;
4363 - u16 rds;
4364 - u16 res;
4365 - u32 reflags;
4366 -} __attribute__((packed));
4367 -
4368 -static unsigned int hpwdt_nmi_decoding;
4369 -static unsigned int allow_kdump = 1;
4370 -static unsigned int is_icru;
4371 -static unsigned int is_uefi;
4372 -static DEFINE_SPINLOCK(rom_lock);
4373 -static void *cru_rom_addr;
4374 -static struct cmn_registers cmn_regs;
4375 -
4376 -extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
4377 - unsigned long *pRomEntry);
4378 -
4379 -#ifdef CONFIG_X86_32
4380 -/* --32 Bit Bios------------------------------------------------------------ */
4381 -
4382 -#define HPWDT_ARCH 32
4383 -
4384 -asm(".text \n\t"
4385 - ".align 4 \n\t"
4386 - ".globl asminline_call \n"
4387 - "asminline_call: \n\t"
4388 - "pushl %ebp \n\t"
4389 - "movl %esp, %ebp \n\t"
4390 - "pusha \n\t"
4391 - "pushf \n\t"
4392 - "push %es \n\t"
4393 - "push %ds \n\t"
4394 - "pop %es \n\t"
4395 - "movl 8(%ebp),%eax \n\t"
4396 - "movl 4(%eax),%ebx \n\t"
4397 - "movl 8(%eax),%ecx \n\t"
4398 - "movl 12(%eax),%edx \n\t"
4399 - "movl 16(%eax),%esi \n\t"
4400 - "movl 20(%eax),%edi \n\t"
4401 - "movl (%eax),%eax \n\t"
4402 - "push %cs \n\t"
4403 - "call *12(%ebp) \n\t"
4404 - "pushf \n\t"
4405 - "pushl %eax \n\t"
4406 - "movl 8(%ebp),%eax \n\t"
4407 - "movl %ebx,4(%eax) \n\t"
4408 - "movl %ecx,8(%eax) \n\t"
4409 - "movl %edx,12(%eax) \n\t"
4410 - "movl %esi,16(%eax) \n\t"
4411 - "movl %edi,20(%eax) \n\t"
4412 - "movw %ds,24(%eax) \n\t"
4413 - "movw %es,26(%eax) \n\t"
4414 - "popl %ebx \n\t"
4415 - "movl %ebx,(%eax) \n\t"
4416 - "popl %ebx \n\t"
4417 - "movl %ebx,28(%eax) \n\t"
4418 - "pop %es \n\t"
4419 - "popf \n\t"
4420 - "popa \n\t"
4421 - "leave \n\t"
4422 - "ret \n\t"
4423 - ".previous");
4424 -
4425 -
4426 -/*
4427 - * cru_detect
4428 - *
4429 - * Routine Description:
4430 - * This function uses the 32-bit BIOS Service Directory record to
4431 - * search for a $CRU record.
4432 - *
4433 - * Return Value:
4434 - * 0 : SUCCESS
4435 - * <0 : FAILURE
4436 - */
4437 -static int cru_detect(unsigned long map_entry,
4438 - unsigned long map_offset)
4439 -{
4440 - void *bios32_map;
4441 - unsigned long *bios32_entrypoint;
4442 - unsigned long cru_physical_address;
4443 - unsigned long cru_length;
4444 - unsigned long physical_bios_base = 0;
4445 - unsigned long physical_bios_offset = 0;
4446 - int retval = -ENODEV;
4447 -
4448 - bios32_map = ioremap(map_entry, (2 * PAGE_SIZE));
4449 -
4450 - if (bios32_map == NULL)
4451 - return -ENODEV;
4452 -
4453 - bios32_entrypoint = bios32_map + map_offset;
4454 -
4455 - cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
4456 -
4457 - set_memory_x((unsigned long)bios32_map, 2);
4458 - asminline_call(&cmn_regs, bios32_entrypoint);
4459 -
4460 - if (cmn_regs.u1.ral != 0) {
4461 - pr_warn("Call succeeded but with an error: 0x%x\n",
4462 - cmn_regs.u1.ral);
4463 - } else {
4464 - physical_bios_base = cmn_regs.u2.rebx;
4465 - physical_bios_offset = cmn_regs.u4.redx;
4466 - cru_length = cmn_regs.u3.recx;
4467 - cru_physical_address =
4468 - physical_bios_base + physical_bios_offset;
4469 -
4470 - /* If the values look OK, then map it in. */
4471 - if ((physical_bios_base + physical_bios_offset)) {
4472 - cru_rom_addr =
4473 - ioremap(cru_physical_address, cru_length);
4474 - if (cru_rom_addr) {
4475 - set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
4476 - (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT);
4477 - retval = 0;
4478 - }
4479 - }
4480 -
4481 - pr_debug("CRU Base Address: 0x%lx\n", physical_bios_base);
4482 - pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset);
4483 - pr_debug("CRU Length: 0x%lx\n", cru_length);
4484 - pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr);
4485 - }
4486 - iounmap(bios32_map);
4487 - return retval;
4488 -}
4489 -
4490 -/*
4491 - * bios_checksum
4492 - */
4493 -static int bios_checksum(const char __iomem *ptr, int len)
4494 -{
4495 - char sum = 0;
4496 - int i;
4497 -
4498 - /*
4499 - * calculate checksum of size bytes. This should add up
4500 - * to zero if we have a valid header.
4501 - */
4502 - for (i = 0; i < len; i++)
4503 - sum += ptr[i];
4504 -
4505 - return ((sum == 0) && (len > 0));
4506 -}
4507 -
4508 -/*
4509 - * bios32_present
4510 - *
4511 - * Routine Description:
4512 - * This function finds the 32-bit BIOS Service Directory
4513 - *
4514 - * Return Value:
4515 - * 0 : SUCCESS
4516 - * <0 : FAILURE
4517 - */
4518 -static int bios32_present(const char __iomem *p)
4519 -{
4520 - struct bios32_service_dir *bios_32_ptr;
4521 - int length;
4522 - unsigned long map_entry, map_offset;
4523 -
4524 - bios_32_ptr = (struct bios32_service_dir *) p;
4525 -
4526 - /*
4527 - * Search for signature by checking equal to the swizzled value
4528 - * instead of calling another routine to perform a strcmp.
4529 - */
4530 - if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) {
4531 - length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN;
4532 - if (bios_checksum(p, length)) {
4533 - /*
4534 - * According to the spec, we're looking for the
4535 - * first 4KB-aligned address below the entrypoint
4536 - * listed in the header. The Service Directory code
4537 - * is guaranteed to occupy no more than 2 4KB pages.
4538 - */
4539 - map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1);
4540 - map_offset = bios_32_ptr->entry_point - map_entry;
4541 -
4542 - return cru_detect(map_entry, map_offset);
4543 - }
4544 - }
4545 - return -ENODEV;
4546 -}
4547 -
4548 -static int detect_cru_service(void)
4549 -{
4550 - char __iomem *p, *q;
4551 - int rc = -1;
4552 -
4553 - /*
4554 - * Search from 0x0f0000 through 0x0fffff, inclusive.
4555 - */
4556 - p = ioremap(PCI_ROM_BASE1, ROM_SIZE);
4557 - if (p == NULL)
4558 - return -ENOMEM;
4559 -
4560 - for (q = p; q < p + ROM_SIZE; q += 16) {
4561 - rc = bios32_present(q);
4562 - if (!rc)
4563 - break;
4564 - }
4565 - iounmap(p);
4566 - return rc;
4567 -}
4568 -/* ------------------------------------------------------------------------- */
4569 -#endif /* CONFIG_X86_32 */
4570 -#ifdef CONFIG_X86_64
4571 -/* --64 Bit Bios------------------------------------------------------------ */
4572 -
4573 -#define HPWDT_ARCH 64
4574 -
4575 -asm(".text \n\t"
4576 - ".align 4 \n\t"
4577 - ".globl asminline_call \n\t"
4578 - ".type asminline_call, @function \n\t"
4579 - "asminline_call: \n\t"
4580 - FRAME_BEGIN
4581 - "pushq %rax \n\t"
4582 - "pushq %rbx \n\t"
4583 - "pushq %rdx \n\t"
4584 - "pushq %r12 \n\t"
4585 - "pushq %r9 \n\t"
4586 - "movq %rsi, %r12 \n\t"
4587 - "movq %rdi, %r9 \n\t"
4588 - "movl 4(%r9),%ebx \n\t"
4589 - "movl 8(%r9),%ecx \n\t"
4590 - "movl 12(%r9),%edx \n\t"
4591 - "movl 16(%r9),%esi \n\t"
4592 - "movl 20(%r9),%edi \n\t"
4593 - "movl (%r9),%eax \n\t"
4594 - "call *%r12 \n\t"
4595 - "pushfq \n\t"
4596 - "popq %r12 \n\t"
4597 - "movl %eax, (%r9) \n\t"
4598 - "movl %ebx, 4(%r9) \n\t"
4599 - "movl %ecx, 8(%r9) \n\t"
4600 - "movl %edx, 12(%r9) \n\t"
4601 - "movl %esi, 16(%r9) \n\t"
4602 - "movl %edi, 20(%r9) \n\t"
4603 - "movq %r12, %rax \n\t"
4604 - "movl %eax, 28(%r9) \n\t"
4605 - "popq %r9 \n\t"
4606 - "popq %r12 \n\t"
4607 - "popq %rdx \n\t"
4608 - "popq %rbx \n\t"
4609 - "popq %rax \n\t"
4610 - FRAME_END
4611 - "ret \n\t"
4612 - ".previous");
4613 -
4614 -/*
4615 - * dmi_find_cru
4616 - *
4617 - * Routine Description:
4618 - * This function checks whether or not a SMBIOS/DMI record is
4619 - * the 64bit CRU info or not
4620 - */
4621 -static void dmi_find_cru(const struct dmi_header *dm, void *dummy)
4622 -{
4623 - struct smbios_cru64_info *smbios_cru64_ptr;
4624 - unsigned long cru_physical_address;
4625 -
4626 - if (dm->type == SMBIOS_CRU64_INFORMATION) {
4627 - smbios_cru64_ptr = (struct smbios_cru64_info *) dm;
4628 - if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) {
4629 - cru_physical_address =
4630 - smbios_cru64_ptr->physical_address +
4631 - smbios_cru64_ptr->double_offset;
4632 - cru_rom_addr = ioremap(cru_physical_address,
4633 - smbios_cru64_ptr->double_length);
4634 - set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
4635 - smbios_cru64_ptr->double_length >> PAGE_SHIFT);
4636 - }
4637 - }
4638 -}
4639 -
4640 -static int detect_cru_service(void)
4641 -{
4642 - cru_rom_addr = NULL;
4643 -
4644 - dmi_walk(dmi_find_cru, NULL);
4645 -
4646 - /* if cru_rom_addr has been set then we found a CRU service */
4647 - return ((cru_rom_addr != NULL) ? 0 : -ENODEV);
4648 -}
4649 -/* ------------------------------------------------------------------------- */
4650 -#endif /* CONFIG_X86_64 */
4651 -#endif /* CONFIG_HPWDT_NMI_DECODING */
4652
4653 /*
4654 * Watchdog operations
4655 @@ -475,32 +103,22 @@ static int hpwdt_time_left(void)
4656 }
4657
4658 #ifdef CONFIG_HPWDT_NMI_DECODING
4659 +static int hpwdt_my_nmi(void)
4660 +{
4661 + return ioread8(hpwdt_nmistat) & 0x6;
4662 +}
4663 +
4664 /*
4665 * NMI Handler
4666 */
4667 static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
4668 {
4669 - unsigned long rom_pl;
4670 - static int die_nmi_called;
4671 -
4672 - if (!hpwdt_nmi_decoding)
4673 + if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
4674 return NMI_DONE;
4675
4676 - spin_lock_irqsave(&rom_lock, rom_pl);
4677 - if (!die_nmi_called && !is_icru && !is_uefi)
4678 - asminline_call(&cmn_regs, cru_rom_addr);
4679 - die_nmi_called = 1;
4680 - spin_unlock_irqrestore(&rom_lock, rom_pl);
4681 -
4682 if (allow_kdump)
4683 hpwdt_stop();
4684
4685 - if (!is_icru && !is_uefi) {
4686 - if (cmn_regs.u1.ral == 0) {
4687 - nmi_panic(regs, "An NMI occurred, but unable to determine source.\n");
4688 - return NMI_HANDLED;
4689 - }
4690 - }
4691 nmi_panic(regs, "An NMI occurred. Depending on your system the reason "
4692 "for the NMI is logged in any one of the following "
4693 "resources:\n"
4694 @@ -666,84 +284,11 @@ static struct miscdevice hpwdt_miscdev = {
4695 * Init & Exit
4696 */
4697
4698 -#ifdef CONFIG_HPWDT_NMI_DECODING
4699 -#ifdef CONFIG_X86_LOCAL_APIC
4700 -static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
4701 -{
4702 - /*
4703 - * If nmi_watchdog is turned off then we can turn on
4704 - * our nmi decoding capability.
4705 - */
4706 - hpwdt_nmi_decoding = 1;
4707 -}
4708 -#else
4709 -static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
4710 -{
4711 - dev_warn(&dev->dev, "NMI decoding is disabled. "
4712 - "Your kernel does not support a NMI Watchdog.\n");
4713 -}
4714 -#endif /* CONFIG_X86_LOCAL_APIC */
4715 -
4716 -/*
4717 - * dmi_find_icru
4718 - *
4719 - * Routine Description:
4720 - * This function checks whether or not we are on an iCRU-based server.
4721 - * This check is independent of architecture and needs to be made for
4722 - * any ProLiant system.
4723 - */
4724 -static void dmi_find_icru(const struct dmi_header *dm, void *dummy)
4725 -{
4726 - struct smbios_proliant_info *smbios_proliant_ptr;
4727 -
4728 - if (dm->type == SMBIOS_ICRU_INFORMATION) {
4729 - smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
4730 - if (smbios_proliant_ptr->misc_features & 0x01)
4731 - is_icru = 1;
4732 - if (smbios_proliant_ptr->misc_features & 0x408)
4733 - is_uefi = 1;
4734 - }
4735 -}
4736
4737 static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
4738 {
4739 +#ifdef CONFIG_HPWDT_NMI_DECODING
4740 int retval;
4741 -
4742 - /*
4743 - * On typical CRU-based systems we need to map that service in
4744 - * the BIOS. For 32 bit Operating Systems we need to go through
4745 - * the 32 Bit BIOS Service Directory. For 64 bit Operating
4746 - * Systems we get that service through SMBIOS.
4747 - *
4748 - * On systems that support the new iCRU service all we need to
4749 - * do is call dmi_walk to get the supported flag value and skip
4750 - * the old cru detect code.
4751 - */
4752 - dmi_walk(dmi_find_icru, NULL);
4753 - if (!is_icru && !is_uefi) {
4754 -
4755 - /*
4756 - * We need to map the ROM to get the CRU service.
4757 - * For 32 bit Operating Systems we need to go through the 32 Bit
4758 - * BIOS Service Directory
4759 - * For 64 bit Operating Systems we get that service through SMBIOS.
4760 - */
4761 - retval = detect_cru_service();
4762 - if (retval < 0) {
4763 - dev_warn(&dev->dev,
4764 - "Unable to detect the %d Bit CRU Service.\n",
4765 - HPWDT_ARCH);
4766 - return retval;
4767 - }
4768 -
4769 - /*
4770 - * We know this is the only CRU call we need to make so lets keep as
4771 - * few instructions as possible once the NMI comes in.
4772 - */
4773 - cmn_regs.u1.rah = 0x0D;
4774 - cmn_regs.u1.ral = 0x02;
4775 - }
4776 -
4777 /*
4778 * Only one function can register for NMI_UNKNOWN
4779 */
4780 @@ -771,44 +316,25 @@ static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
4781 dev_warn(&dev->dev,
4782 "Unable to register a die notifier (err=%d).\n",
4783 retval);
4784 - if (cru_rom_addr)
4785 - iounmap(cru_rom_addr);
4786 return retval;
4787 +#endif /* CONFIG_HPWDT_NMI_DECODING */
4788 + return 0;
4789 }
4790
4791 static void hpwdt_exit_nmi_decoding(void)
4792 {
4793 +#ifdef CONFIG_HPWDT_NMI_DECODING
4794 unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
4795 unregister_nmi_handler(NMI_SERR, "hpwdt");
4796 unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
4797 - if (cru_rom_addr)
4798 - iounmap(cru_rom_addr);
4799 -}
4800 -#else /* !CONFIG_HPWDT_NMI_DECODING */
4801 -static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
4802 -{
4803 -}
4804 -
4805 -static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
4806 -{
4807 - return 0;
4808 +#endif
4809 }
4810
4811 -static void hpwdt_exit_nmi_decoding(void)
4812 -{
4813 -}
4814 -#endif /* CONFIG_HPWDT_NMI_DECODING */
4815 -
4816 static int hpwdt_init_one(struct pci_dev *dev,
4817 const struct pci_device_id *ent)
4818 {
4819 int retval;
4820
4821 - /*
4822 - * Check if we can do NMI decoding or not
4823 - */
4824 - hpwdt_check_nmi_decoding(dev);
4825 -
4826 /*
4827 * First let's find out if we are on an iLO2+ server. We will
4828 * not run on a legacy ASM box.
4829 @@ -842,6 +368,7 @@ static int hpwdt_init_one(struct pci_dev *dev,
4830 retval = -ENOMEM;
4831 goto error_pci_iomap;
4832 }
4833 + hpwdt_nmistat = pci_mem_addr + 0x6e;
4834 hpwdt_timer_reg = pci_mem_addr + 0x70;
4835 hpwdt_timer_con = pci_mem_addr + 0x72;
4836
4837 @@ -912,6 +439,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
4838 #ifdef CONFIG_HPWDT_NMI_DECODING
4839 module_param(allow_kdump, int, 0);
4840 MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
4841 -#endif /* !CONFIG_HPWDT_NMI_DECODING */
4842 +#endif /* CONFIG_HPWDT_NMI_DECODING */
4843
4844 module_pci_driver(hpwdt_driver);
4845 diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
4846 index 8c10b0562e75..621c517b325c 100644
4847 --- a/fs/nfs/direct.c
4848 +++ b/fs/nfs/direct.c
4849 @@ -86,10 +86,10 @@ struct nfs_direct_req {
4850 struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
4851 int mirror_count;
4852
4853 + loff_t io_start; /* Start offset for I/O */
4854 ssize_t count, /* bytes actually processed */
4855 max_count, /* max expected count */
4856 bytes_left, /* bytes left to be sent */
4857 - io_start, /* start of IO */
4858 error; /* any reported error */
4859 struct completion completion; /* wait for i/o completion */
4860
4861 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
4862 index 5f2f852ef506..7b34534210ce 100644
4863 --- a/fs/nfs/pnfs.c
4864 +++ b/fs/nfs/pnfs.c
4865 @@ -292,8 +292,11 @@ pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
4866 void
4867 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
4868 {
4869 - struct inode *inode = lo->plh_inode;
4870 + struct inode *inode;
4871
4872 + if (!lo)
4873 + return;
4874 + inode = lo->plh_inode;
4875 pnfs_layoutreturn_before_put_layout_hdr(lo);
4876
4877 if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
4878 @@ -1223,10 +1226,12 @@ bool pnfs_roc(struct inode *ino,
4879 spin_lock(&ino->i_lock);
4880 lo = nfsi->layout;
4881 if (!lo || !pnfs_layout_is_valid(lo) ||
4882 - test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
4883 + test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
4884 + lo = NULL;
4885 goto out_noroc;
4886 + }
4887 + pnfs_get_layout_hdr(lo);
4888 if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
4889 - pnfs_get_layout_hdr(lo);
4890 spin_unlock(&ino->i_lock);
4891 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
4892 TASK_UNINTERRUPTIBLE);
4893 @@ -1294,10 +1299,12 @@ bool pnfs_roc(struct inode *ino,
4894 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
4895 if (ld->prepare_layoutreturn)
4896 ld->prepare_layoutreturn(args);
4897 + pnfs_put_layout_hdr(lo);
4898 return true;
4899 }
4900 if (layoutreturn)
4901 pnfs_send_layoutreturn(lo, &stateid, iomode, true);
4902 + pnfs_put_layout_hdr(lo);
4903 return false;
4904 }
4905
4906 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
4907 index 76da415be39a..19e6ea89ad26 100644
4908 --- a/fs/nfs/write.c
4909 +++ b/fs/nfs/write.c
4910 @@ -1877,40 +1877,43 @@ int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
4911 return status;
4912 }
4913
4914 -int nfs_commit_inode(struct inode *inode, int how)
4915 +static int __nfs_commit_inode(struct inode *inode, int how,
4916 + struct writeback_control *wbc)
4917 {
4918 LIST_HEAD(head);
4919 struct nfs_commit_info cinfo;
4920 int may_wait = how & FLUSH_SYNC;
4921 - int error = 0;
4922 - int res;
4923 + int ret, nscan;
4924
4925 nfs_init_cinfo_from_inode(&cinfo, inode);
4926 nfs_commit_begin(cinfo.mds);
4927 - res = nfs_scan_commit(inode, &head, &cinfo);
4928 - if (res)
4929 - error = nfs_generic_commit_list(inode, &head, how, &cinfo);
4930 + for (;;) {
4931 + ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
4932 + if (ret <= 0)
4933 + break;
4934 + ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
4935 + if (ret < 0)
4936 + break;
4937 + ret = 0;
4938 + if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
4939 + if (nscan < wbc->nr_to_write)
4940 + wbc->nr_to_write -= nscan;
4941 + else
4942 + wbc->nr_to_write = 0;
4943 + }
4944 + if (nscan < INT_MAX)
4945 + break;
4946 + cond_resched();
4947 + }
4948 nfs_commit_end(cinfo.mds);
4949 - if (res == 0)
4950 - return res;
4951 - if (error < 0)
4952 - goto out_error;
4953 - if (!may_wait)
4954 - goto out_mark_dirty;
4955 - error = wait_on_commit(cinfo.mds);
4956 - if (error < 0)
4957 - return error;
4958 - return res;
4959 -out_error:
4960 - res = error;
4961 - /* Note: If we exit without ensuring that the commit is complete,
4962 - * we must mark the inode as dirty. Otherwise, future calls to
4963 - * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
4964 - * that the data is on the disk.
4965 - */
4966 -out_mark_dirty:
4967 - __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
4968 - return res;
4969 + if (ret || !may_wait)
4970 + return ret;
4971 + return wait_on_commit(cinfo.mds);
4972 +}
4973 +
4974 +int nfs_commit_inode(struct inode *inode, int how)
4975 +{
4976 + return __nfs_commit_inode(inode, how, NULL);
4977 }
4978 EXPORT_SYMBOL_GPL(nfs_commit_inode);
4979
4980 @@ -1920,11 +1923,11 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4981 int flags = FLUSH_SYNC;
4982 int ret = 0;
4983
4984 - /* no commits means nothing needs to be done */
4985 - if (!atomic_long_read(&nfsi->commit_info.ncommit))
4986 - return ret;
4987 -
4988 if (wbc->sync_mode == WB_SYNC_NONE) {
4989 + /* no commits means nothing needs to be done */
4990 + if (!atomic_long_read(&nfsi->commit_info.ncommit))
4991 + goto check_requests_outstanding;
4992 +
4993 /* Don't commit yet if this is a non-blocking flush and there
4994 * are a lot of outstanding writes for this mapping.
4995 */
4996 @@ -1935,16 +1938,16 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4997 flags = 0;
4998 }
4999
5000 - ret = nfs_commit_inode(inode, flags);
5001 - if (ret >= 0) {
5002 - if (wbc->sync_mode == WB_SYNC_NONE) {
5003 - if (ret < wbc->nr_to_write)
5004 - wbc->nr_to_write -= ret;
5005 - else
5006 - wbc->nr_to_write = 0;
5007 - }
5008 - return 0;
5009 - }
5010 + ret = __nfs_commit_inode(inode, flags, wbc);
5011 + if (!ret) {
5012 + if (flags & FLUSH_SYNC)
5013 + return 0;
5014 + } else if (atomic_long_read(&nfsi->commit_info.ncommit))
5015 + goto out_mark_dirty;
5016 +
5017 +check_requests_outstanding:
5018 + if (!atomic_read(&nfsi->commit_info.rpcs_out))
5019 + return ret;
5020 out_mark_dirty:
5021 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
5022 return ret;
5023 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
5024 index 76e237bd989b..6914633037a5 100644
5025 --- a/include/drm/drm_crtc_helper.h
5026 +++ b/include/drm/drm_crtc_helper.h
5027 @@ -77,5 +77,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev);
5028
5029 void drm_kms_helper_poll_disable(struct drm_device *dev);
5030 void drm_kms_helper_poll_enable(struct drm_device *dev);
5031 +bool drm_kms_helper_is_poll_worker(void);
5032
5033 #endif
5034 diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
5035 index 71bbaaec836d..305304965b89 100644
5036 --- a/include/drm/drm_drv.h
5037 +++ b/include/drm/drm_drv.h
5038 @@ -55,6 +55,7 @@ struct drm_mode_create_dumb;
5039 #define DRIVER_ATOMIC 0x10000
5040 #define DRIVER_KMS_LEGACY_CONTEXT 0x20000
5041 #define DRIVER_SYNCOBJ 0x40000
5042 +#define DRIVER_PREFER_XBGR_30BPP 0x80000
5043
5044 /**
5045 * struct drm_driver - DRM driver structure
5046 diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
5047 index 3b609edffa8f..be3aef6839f6 100644
5048 --- a/include/linux/compiler-clang.h
5049 +++ b/include/linux/compiler-clang.h
5050 @@ -19,3 +19,8 @@
5051
5052 #define randomized_struct_fields_start struct {
5053 #define randomized_struct_fields_end };
5054 +
5055 +/* Clang doesn't have a way to turn it off per-function, yet. */
5056 +#ifdef __noretpoline
5057 +#undef __noretpoline
5058 +#endif
5059 diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
5060 index bf09213895f7..a1ffbf25873f 100644
5061 --- a/include/linux/compiler-gcc.h
5062 +++ b/include/linux/compiler-gcc.h
5063 @@ -93,6 +93,10 @@
5064 #define __weak __attribute__((weak))
5065 #define __alias(symbol) __attribute__((alias(#symbol)))
5066
5067 +#ifdef RETPOLINE
5068 +#define __noretpoline __attribute__((indirect_branch("keep")))
5069 +#endif
5070 +
5071 /*
5072 * it doesn't make sense on ARM (currently the only user of __naked)
5073 * to trace naked functions because then mcount is called without
5074 diff --git a/include/linux/init.h b/include/linux/init.h
5075 index 943139a563e3..07cab8a053af 100644
5076 --- a/include/linux/init.h
5077 +++ b/include/linux/init.h
5078 @@ -6,10 +6,10 @@
5079 #include <linux/types.h>
5080
5081 /* Built-in __init functions needn't be compiled with retpoline */
5082 -#if defined(RETPOLINE) && !defined(MODULE)
5083 -#define __noretpoline __attribute__((indirect_branch("keep")))
5084 +#if defined(__noretpoline) && !defined(MODULE)
5085 +#define __noinitretpoline __noretpoline
5086 #else
5087 -#define __noretpoline
5088 +#define __noinitretpoline
5089 #endif
5090
5091 /* These macros are used to mark some functions or
5092 @@ -47,7 +47,7 @@
5093
5094 /* These are for everybody (although not all archs will actually
5095 discard it in modules) */
5096 -#define __init __section(.init.text) __cold __inittrace __latent_entropy __noretpoline
5097 +#define __init __section(.init.text) __cold __inittrace __latent_entropy __noinitretpoline
5098 #define __initdata __section(.init.data)
5099 #define __initconst __section(.init.rodata)
5100 #define __exitdata __section(.exit.data)
5101 diff --git a/include/linux/nospec.h b/include/linux/nospec.h
5102 index 132e3f5a2e0d..e791ebc65c9c 100644
5103 --- a/include/linux/nospec.h
5104 +++ b/include/linux/nospec.h
5105 @@ -5,6 +5,7 @@
5106
5107 #ifndef _LINUX_NOSPEC_H
5108 #define _LINUX_NOSPEC_H
5109 +#include <asm/barrier.h>
5110
5111 /**
5112 * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
5113 @@ -29,26 +30,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
5114 }
5115 #endif
5116
5117 -/*
5118 - * Warn developers about inappropriate array_index_nospec() usage.
5119 - *
5120 - * Even if the CPU speculates past the WARN_ONCE branch, the
5121 - * sign bit of @index is taken into account when generating the
5122 - * mask.
5123 - *
5124 - * This warning is compiled out when the compiler can infer that
5125 - * @index and @size are less than LONG_MAX.
5126 - */
5127 -#define array_index_mask_nospec_check(index, size) \
5128 -({ \
5129 - if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
5130 - "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
5131 - _mask = 0; \
5132 - else \
5133 - _mask = array_index_mask_nospec(index, size); \
5134 - _mask; \
5135 -})
5136 -
5137 /*
5138 * array_index_nospec - sanitize an array index after a bounds check
5139 *
5140 @@ -67,7 +48,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
5141 ({ \
5142 typeof(index) _i = (index); \
5143 typeof(size) _s = (size); \
5144 - unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
5145 + unsigned long _mask = array_index_mask_nospec(_i, _s); \
5146 \
5147 BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
5148 BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
5149 diff --git a/include/linux/tpm.h b/include/linux/tpm.h
5150 index 5a090f5ab335..881312d85574 100644
5151 --- a/include/linux/tpm.h
5152 +++ b/include/linux/tpm.h
5153 @@ -50,6 +50,7 @@ struct tpm_class_ops {
5154 unsigned long *timeout_cap);
5155 int (*request_locality)(struct tpm_chip *chip, int loc);
5156 void (*relinquish_locality)(struct tpm_chip *chip, int loc);
5157 + void (*clk_enable)(struct tpm_chip *chip, bool value);
5158 };
5159
5160 #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
5161 diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
5162 index 0eae11fc7a23..1c527abb1ae5 100644
5163 --- a/include/linux/workqueue.h
5164 +++ b/include/linux/workqueue.h
5165 @@ -467,6 +467,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
5166
5167 extern void workqueue_set_max_active(struct workqueue_struct *wq,
5168 int max_active);
5169 +extern struct work_struct *current_work(void);
5170 extern bool current_is_workqueue_rescuer(void);
5171 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
5172 extern unsigned int work_busy(struct work_struct *work);
5173 diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
5174 index 7fb57e905526..7bc752fc98de 100644
5175 --- a/include/scsi/scsi_cmnd.h
5176 +++ b/include/scsi/scsi_cmnd.h
5177 @@ -69,6 +69,9 @@ struct scsi_cmnd {
5178 struct list_head list; /* scsi_cmnd participates in queue lists */
5179 struct list_head eh_entry; /* entry for the host eh_cmd_q */
5180 struct delayed_work abort_work;
5181 +
5182 + struct rcu_head rcu;
5183 +
5184 int eh_eflags; /* Used by error handlr */
5185
5186 /*
5187 diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
5188 index 1a1df0d21ee3..a8b7bf879ced 100644
5189 --- a/include/scsi/scsi_host.h
5190 +++ b/include/scsi/scsi_host.h
5191 @@ -571,8 +571,6 @@ struct Scsi_Host {
5192 struct blk_mq_tag_set tag_set;
5193 };
5194
5195 - struct rcu_head rcu;
5196 -
5197 atomic_t host_busy; /* commands actually active on low-level */
5198 atomic_t host_blocked;
5199
5200 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
5201 index 8365a52a74c5..d0c6b50792c8 100644
5202 --- a/kernel/workqueue.c
5203 +++ b/kernel/workqueue.c
5204 @@ -4184,6 +4184,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
5205 }
5206 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
5207
5208 +/**
5209 + * current_work - retrieve %current task's work struct
5210 + *
5211 + * Determine if %current task is a workqueue worker and what it's working on.
5212 + * Useful to find out the context that the %current task is running in.
5213 + *
5214 + * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
5215 + */
5216 +struct work_struct *current_work(void)
5217 +{
5218 + struct worker *worker = current_wq_worker();
5219 +
5220 + return worker ? worker->current_work : NULL;
5221 +}
5222 +EXPORT_SYMBOL(current_work);
5223 +
5224 /**
5225 * current_is_workqueue_rescuer - is %current workqueue rescuer?
5226 *
5227 diff --git a/lib/bug.c b/lib/bug.c
5228 index 1e094408c893..d2c9a099561a 100644
5229 --- a/lib/bug.c
5230 +++ b/lib/bug.c
5231 @@ -150,6 +150,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
5232 return BUG_TRAP_TYPE_NONE;
5233
5234 bug = find_bug(bugaddr);
5235 + if (!bug)
5236 + return BUG_TRAP_TYPE_NONE;
5237
5238 file = NULL;
5239 line = 0;
5240 diff --git a/mm/memblock.c b/mm/memblock.c
5241 index 91205780e6b1..6dd303717a4d 100644
5242 --- a/mm/memblock.c
5243 +++ b/mm/memblock.c
5244 @@ -1107,7 +1107,7 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
5245 struct memblock_type *type = &memblock.memory;
5246 unsigned int right = type->cnt;
5247 unsigned int mid, left = 0;
5248 - phys_addr_t addr = PFN_PHYS(pfn + 1);
5249 + phys_addr_t addr = PFN_PHYS(++pfn);
5250
5251 do {
5252 mid = (right + left) / 2;
5253 @@ -1118,15 +1118,15 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
5254 type->regions[mid].size))
5255 left = mid + 1;
5256 else {
5257 - /* addr is within the region, so pfn + 1 is valid */
5258 - return min(pfn + 1, max_pfn);
5259 + /* addr is within the region, so pfn is valid */
5260 + return pfn;
5261 }
5262 } while (left < right);
5263
5264 if (right == type->cnt)
5265 - return max_pfn;
5266 + return -1UL;
5267 else
5268 - return min(PHYS_PFN(type->regions[right].base), max_pfn);
5269 + return PHYS_PFN(type->regions[right].base);
5270 }
5271
5272 /**
5273 diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
5274 index 279527f8b1fe..59baaecd3e54 100644
5275 --- a/net/bridge/netfilter/ebt_among.c
5276 +++ b/net/bridge/netfilter/ebt_among.c
5277 @@ -172,18 +172,35 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
5278 return true;
5279 }
5280
5281 +static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
5282 +{
5283 + return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
5284 +}
5285 +
5286 static int ebt_among_mt_check(const struct xt_mtchk_param *par)
5287 {
5288 const struct ebt_among_info *info = par->matchinfo;
5289 const struct ebt_entry_match *em =
5290 container_of(par->matchinfo, const struct ebt_entry_match, data);
5291 - int expected_length = sizeof(struct ebt_among_info);
5292 + unsigned int expected_length = sizeof(struct ebt_among_info);
5293 const struct ebt_mac_wormhash *wh_dst, *wh_src;
5294 int err;
5295
5296 + if (expected_length > em->match_size)
5297 + return -EINVAL;
5298 +
5299 wh_dst = ebt_among_wh_dst(info);
5300 - wh_src = ebt_among_wh_src(info);
5301 + if (poolsize_invalid(wh_dst))
5302 + return -EINVAL;
5303 +
5304 expected_length += ebt_mac_wormhash_size(wh_dst);
5305 + if (expected_length > em->match_size)
5306 + return -EINVAL;
5307 +
5308 + wh_src = ebt_among_wh_src(info);
5309 + if (poolsize_invalid(wh_src))
5310 + return -EINVAL;
5311 +
5312 expected_length += ebt_mac_wormhash_size(wh_src);
5313
5314 if (em->match_size != EBT_ALIGN(expected_length)) {
5315 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
5316 index 3b3dcf719e07..16eb99458df4 100644
5317 --- a/net/bridge/netfilter/ebtables.c
5318 +++ b/net/bridge/netfilter/ebtables.c
5319 @@ -2053,7 +2053,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
5320 if (match_kern)
5321 match_kern->match_size = ret;
5322
5323 - WARN_ON(type == EBT_COMPAT_TARGET && size_left);
5324 + if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
5325 + return -EINVAL;
5326 +
5327 match32 = (struct compat_ebt_entry_mwt *) buf;
5328 }
5329
5330 @@ -2109,6 +2111,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
5331 *
5332 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
5333 */
5334 + for (i = 0; i < 4 ; ++i) {
5335 + if (offsets[i] >= *total)
5336 + return -EINVAL;
5337 + if (i == 0)
5338 + continue;
5339 + if (offsets[i-1] > offsets[i])
5340 + return -EINVAL;
5341 + }
5342 +
5343 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
5344 struct compat_ebt_entry_mwt *match32;
5345 unsigned int size;
5346 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
5347 index 9e2770fd00be..aa4c3b7f7da4 100644
5348 --- a/net/ipv4/netfilter/arp_tables.c
5349 +++ b/net/ipv4/netfilter/arp_tables.c
5350 @@ -257,6 +257,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
5351 }
5352 if (table_base + v
5353 != arpt_next_entry(e)) {
5354 + if (unlikely(stackidx >= private->stacksize)) {
5355 + verdict = NF_DROP;
5356 + break;
5357 + }
5358 jumpstack[stackidx++] = e;
5359 }
5360
5361 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
5362 index 39286e543ee6..cadb82a906b8 100644
5363 --- a/net/ipv4/netfilter/ip_tables.c
5364 +++ b/net/ipv4/netfilter/ip_tables.c
5365 @@ -335,8 +335,13 @@ ipt_do_table(struct sk_buff *skb,
5366 continue;
5367 }
5368 if (table_base + v != ipt_next_entry(e) &&
5369 - !(e->ip.flags & IPT_F_GOTO))
5370 + !(e->ip.flags & IPT_F_GOTO)) {
5371 + if (unlikely(stackidx >= private->stacksize)) {
5372 + verdict = NF_DROP;
5373 + break;
5374 + }
5375 jumpstack[stackidx++] = e;
5376 + }
5377
5378 e = get_entry(table_base, v);
5379 continue;
5380 diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
5381 index 24a8c2e63e3d..c07e9db95ccc 100644
5382 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
5383 +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
5384 @@ -107,12 +107,6 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
5385
5386 local_bh_disable();
5387 if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
5388 - list_del_rcu(&c->list);
5389 - spin_unlock(&cn->lock);
5390 - local_bh_enable();
5391 -
5392 - unregister_netdevice_notifier(&c->notifier);
5393 -
5394 /* In case anyone still accesses the file, the open/close
5395 * functions are also incrementing the refcount on their own,
5396 * so it's safe to remove the entry even if it's in use. */
5397 @@ -120,6 +114,12 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
5398 if (cn->procdir)
5399 proc_remove(c->pde);
5400 #endif
5401 + list_del_rcu(&c->list);
5402 + spin_unlock(&cn->lock);
5403 + local_bh_enable();
5404 +
5405 + unregister_netdevice_notifier(&c->notifier);
5406 +
5407 return;
5408 }
5409 local_bh_enable();
5410 diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
5411 index 39970e212ad5..9bf260459f83 100644
5412 --- a/net/ipv6/netfilter.c
5413 +++ b/net/ipv6/netfilter.c
5414 @@ -21,18 +21,19 @@
5415 int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
5416 {
5417 const struct ipv6hdr *iph = ipv6_hdr(skb);
5418 + struct sock *sk = sk_to_full_sk(skb->sk);
5419 unsigned int hh_len;
5420 struct dst_entry *dst;
5421 struct flowi6 fl6 = {
5422 - .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
5423 + .flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
5424 .flowi6_mark = skb->mark,
5425 - .flowi6_uid = sock_net_uid(net, skb->sk),
5426 + .flowi6_uid = sock_net_uid(net, sk),
5427 .daddr = iph->daddr,
5428 .saddr = iph->saddr,
5429 };
5430 int err;
5431
5432 - dst = ip6_route_output(net, skb->sk, &fl6);
5433 + dst = ip6_route_output(net, sk, &fl6);
5434 err = dst->error;
5435 if (err) {
5436 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
5437 @@ -50,7 +51,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
5438 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
5439 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
5440 skb_dst_set(skb, NULL);
5441 - dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0);
5442 + dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
5443 if (IS_ERR(dst))
5444 return PTR_ERR(dst);
5445 skb_dst_set(skb, dst);
5446 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
5447 index 01bd3ee5ebc6..a0a31972fc75 100644
5448 --- a/net/ipv6/netfilter/ip6_tables.c
5449 +++ b/net/ipv6/netfilter/ip6_tables.c
5450 @@ -357,6 +357,10 @@ ip6t_do_table(struct sk_buff *skb,
5451 }
5452 if (table_base + v != ip6t_next_entry(e) &&
5453 !(e->ipv6.flags & IP6T_F_GOTO)) {
5454 + if (unlikely(stackidx >= private->stacksize)) {
5455 + verdict = NF_DROP;
5456 + break;
5457 + }
5458 jumpstack[stackidx++] = e;
5459 }
5460
5461 diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
5462 index 46d6dba50698..c5053dbfc391 100644
5463 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
5464 +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
5465 @@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
5466 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
5467 target, maniptype))
5468 return false;
5469 +
5470 + /* must reload, offset might have changed */
5471 + ipv6h = (void *)skb->data + iphdroff;
5472 +
5473 manip_addr:
5474 if (maniptype == NF_NAT_MANIP_SRC)
5475 ipv6h->saddr = target->src.u3.in6;
5476 diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
5477 index fbce552a796e..7d7466dbf663 100644
5478 --- a/net/netfilter/nf_nat_proto_common.c
5479 +++ b/net/netfilter/nf_nat_proto_common.c
5480 @@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
5481 const struct nf_conn *ct,
5482 u16 *rover)
5483 {
5484 - unsigned int range_size, min, i;
5485 + unsigned int range_size, min, max, i;
5486 __be16 *portptr;
5487 u_int16_t off;
5488
5489 @@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
5490 }
5491 } else {
5492 min = ntohs(range->min_proto.all);
5493 - range_size = ntohs(range->max_proto.all) - min + 1;
5494 + max = ntohs(range->max_proto.all);
5495 + if (unlikely(max < min))
5496 + swap(max, min);
5497 + range_size = max - min + 1;
5498 }
5499
5500 if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
5501 diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
5502 index daf45da448fa..bb5d6a058fb7 100644
5503 --- a/net/netfilter/xt_IDLETIMER.c
5504 +++ b/net/netfilter/xt_IDLETIMER.c
5505 @@ -147,11 +147,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
5506 (unsigned long) info->timer);
5507 info->timer->refcnt = 1;
5508
5509 + INIT_WORK(&info->timer->work, idletimer_tg_work);
5510 +
5511 mod_timer(&info->timer->timer,
5512 msecs_to_jiffies(info->timeout * 1000) + jiffies);
5513
5514 - INIT_WORK(&info->timer->work, idletimer_tg_work);
5515 -
5516 return 0;
5517
5518 out_free_attr:
5519 @@ -192,7 +192,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
5520 pr_debug("timeout value is zero\n");
5521 return -EINVAL;
5522 }
5523 -
5524 + if (info->timeout >= INT_MAX / 1000) {
5525 + pr_debug("timeout value is too big\n");
5526 + return -EINVAL;
5527 + }
5528 if (info->label[0] == '\0' ||
5529 strnlen(info->label,
5530 MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
5531 diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
5532 index 3ba31c194cce..0858fe17e14a 100644
5533 --- a/net/netfilter/xt_LED.c
5534 +++ b/net/netfilter/xt_LED.c
5535 @@ -141,10 +141,11 @@ static int led_tg_check(const struct xt_tgchk_param *par)
5536 goto exit_alloc;
5537 }
5538
5539 - /* See if we need to set up a timer */
5540 - if (ledinfo->delay > 0)
5541 - setup_timer(&ledinternal->timer, led_timeout_callback,
5542 - (unsigned long)ledinternal);
5543 + /* Since the letinternal timer can be shared between multiple targets,
5544 + * always set it up, even if the current target does not need it
5545 + */
5546 + setup_timer(&ledinternal->timer, led_timeout_callback,
5547 + (unsigned long)ledinternal);
5548
5549 list_add_tail(&ledinternal->list, &xt_led_triggers);
5550
5551 @@ -181,8 +182,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
5552
5553 list_del(&ledinternal->list);
5554
5555 - if (ledinfo->delay > 0)
5556 - del_timer_sync(&ledinternal->timer);
5557 + del_timer_sync(&ledinternal->timer);
5558
5559 led_trigger_unregister(&ledinternal->netfilter_led_trigger);
5560
5561 diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
5562 index 5da8746f7b88..b8a3e740ffd4 100644
5563 --- a/net/netfilter/xt_hashlimit.c
5564 +++ b/net/netfilter/xt_hashlimit.c
5565 @@ -774,7 +774,7 @@ hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par,
5566 if (!dh->rateinfo.prev_window &&
5567 (dh->rateinfo.current_rate <= dh->rateinfo.burst)) {
5568 spin_unlock(&dh->lock);
5569 - rcu_read_unlock_bh();
5570 + local_bh_enable();
5571 return !(cfg->mode & XT_HASHLIMIT_INVERT);
5572 } else {
5573 goto overlimit;
5574 diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
5575 index 745f145d4c4d..a6d604fd9695 100644
5576 --- a/net/smc/af_smc.c
5577 +++ b/net/smc/af_smc.c
5578 @@ -1351,8 +1351,10 @@ static int smc_create(struct net *net, struct socket *sock, int protocol,
5579 smc->use_fallback = false; /* assume rdma capability first */
5580 rc = sock_create_kern(net, PF_INET, SOCK_STREAM,
5581 IPPROTO_TCP, &smc->clcsock);
5582 - if (rc)
5583 + if (rc) {
5584 sk_common_release(sk);
5585 + goto out;
5586 + }
5587 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
5588 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
5589
5590 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
5591 index 6bed45dc2cb1..7143da06d702 100644
5592 --- a/scripts/Makefile.build
5593 +++ b/scripts/Makefile.build
5594 @@ -261,6 +261,8 @@ __objtool_obj := $(objtree)/tools/objtool/objtool
5595
5596 objtool_args = $(if $(CONFIG_UNWINDER_ORC),orc generate,check)
5597
5598 +objtool_args += $(if $(part-of-module), --module,)
5599 +
5600 ifndef CONFIG_FRAME_POINTER
5601 objtool_args += --no-fp
5602 endif
5603 @@ -269,6 +271,12 @@ objtool_args += --no-unreachable
5604 else
5605 objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
5606 endif
5607 +ifdef CONFIG_RETPOLINE
5608 +ifneq ($(RETPOLINE_CFLAGS),)
5609 + objtool_args += --retpoline
5610 +endif
5611 +endif
5612 +
5613
5614 ifdef CONFIG_MODVERSIONS
5615 objtool_o = $(@D)/.tmp_$(@F)
5616 diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
5617 index 0b46136a91a8..aac94d962ed6 100644
5618 --- a/scripts/Makefile.lib
5619 +++ b/scripts/Makefile.lib
5620 @@ -294,11 +294,11 @@ cmd_dt_S_dtb= \
5621 echo '\#include <asm-generic/vmlinux.lds.h>'; \
5622 echo '.section .dtb.init.rodata,"a"'; \
5623 echo '.balign STRUCT_ALIGNMENT'; \
5624 - echo '.global __dtb_$(*F)_begin'; \
5625 - echo '__dtb_$(*F)_begin:'; \
5626 + echo '.global __dtb_$(subst -,_,$(*F))_begin'; \
5627 + echo '__dtb_$(subst -,_,$(*F))_begin:'; \
5628 echo '.incbin "$<" '; \
5629 - echo '__dtb_$(*F)_end:'; \
5630 - echo '.global __dtb_$(*F)_end'; \
5631 + echo '__dtb_$(subst -,_,$(*F))_end:'; \
5632 + echo '.global __dtb_$(subst -,_,$(*F))_end'; \
5633 echo '.balign STRUCT_ALIGNMENT'; \
5634 ) > $@
5635
5636 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
5637 index dea11d1babf5..1f3aa466ac9b 100644
5638 --- a/sound/core/seq/seq_clientmgr.c
5639 +++ b/sound/core/seq/seq_clientmgr.c
5640 @@ -906,7 +906,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
5641 static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
5642 struct snd_seq_event *event,
5643 struct file *file, int blocking,
5644 - int atomic, int hop)
5645 + int atomic, int hop,
5646 + struct mutex *mutexp)
5647 {
5648 struct snd_seq_event_cell *cell;
5649 int err;
5650 @@ -944,7 +945,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
5651 return -ENXIO; /* queue is not allocated */
5652
5653 /* allocate an event cell */
5654 - err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
5655 + err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
5656 + file, mutexp);
5657 if (err < 0)
5658 return err;
5659
5660 @@ -1013,12 +1015,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
5661 return -ENXIO;
5662
5663 /* allocate the pool now if the pool is not allocated yet */
5664 + mutex_lock(&client->ioctl_mutex);
5665 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
5666 - mutex_lock(&client->ioctl_mutex);
5667 err = snd_seq_pool_init(client->pool);
5668 - mutex_unlock(&client->ioctl_mutex);
5669 if (err < 0)
5670 - return -ENOMEM;
5671 + goto out;
5672 }
5673
5674 /* only process whole events */
5675 @@ -1069,7 +1070,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
5676 /* ok, enqueue it */
5677 err = snd_seq_client_enqueue_event(client, &event, file,
5678 !(file->f_flags & O_NONBLOCK),
5679 - 0, 0);
5680 + 0, 0, &client->ioctl_mutex);
5681 if (err < 0)
5682 break;
5683
5684 @@ -1080,6 +1081,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
5685 written += len;
5686 }
5687
5688 + out:
5689 + mutex_unlock(&client->ioctl_mutex);
5690 return written ? written : err;
5691 }
5692
5693 @@ -1834,6 +1837,9 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
5694 (! snd_seq_write_pool_allocated(client) ||
5695 info->output_pool != client->pool->size)) {
5696 if (snd_seq_write_pool_allocated(client)) {
5697 + /* is the pool in use? */
5698 + if (atomic_read(&client->pool->counter))
5699 + return -EBUSY;
5700 /* remove all existing cells */
5701 snd_seq_pool_mark_closing(client->pool);
5702 snd_seq_queue_client_leave_cells(client->number);
5703 @@ -2256,7 +2262,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
5704 if (! cptr->accept_output)
5705 result = -EPERM;
5706 else /* send it */
5707 - result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
5708 + result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
5709 + atomic, hop, NULL);
5710
5711 snd_seq_client_unlock(cptr);
5712 return result;
5713 diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
5714 index a8c2822e0198..72c0302a55d2 100644
5715 --- a/sound/core/seq/seq_fifo.c
5716 +++ b/sound/core/seq/seq_fifo.c
5717 @@ -125,7 +125,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
5718 return -EINVAL;
5719
5720 snd_use_lock_use(&f->use_lock);
5721 - err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
5722 + err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
5723 if (err < 0) {
5724 if ((err == -ENOMEM) || (err == -EAGAIN))
5725 atomic_inc(&f->overflow);
5726 diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
5727 index f763682584a8..ab1112e90f88 100644
5728 --- a/sound/core/seq/seq_memory.c
5729 +++ b/sound/core/seq/seq_memory.c
5730 @@ -220,7 +220,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
5731 */
5732 static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
5733 struct snd_seq_event_cell **cellp,
5734 - int nonblock, struct file *file)
5735 + int nonblock, struct file *file,
5736 + struct mutex *mutexp)
5737 {
5738 struct snd_seq_event_cell *cell;
5739 unsigned long flags;
5740 @@ -244,7 +245,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
5741 set_current_state(TASK_INTERRUPTIBLE);
5742 add_wait_queue(&pool->output_sleep, &wait);
5743 spin_unlock_irq(&pool->lock);
5744 + if (mutexp)
5745 + mutex_unlock(mutexp);
5746 schedule();
5747 + if (mutexp)
5748 + mutex_lock(mutexp);
5749 spin_lock_irq(&pool->lock);
5750 remove_wait_queue(&pool->output_sleep, &wait);
5751 /* interrupted? */
5752 @@ -287,7 +292,7 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
5753 */
5754 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
5755 struct snd_seq_event_cell **cellp, int nonblock,
5756 - struct file *file)
5757 + struct file *file, struct mutex *mutexp)
5758 {
5759 int ncells, err;
5760 unsigned int extlen;
5761 @@ -304,7 +309,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
5762 if (ncells >= pool->total_elements)
5763 return -ENOMEM;
5764
5765 - err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
5766 + err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
5767 if (err < 0)
5768 return err;
5769
5770 @@ -330,7 +335,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
5771 int size = sizeof(struct snd_seq_event);
5772 if (len < size)
5773 size = len;
5774 - err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
5775 + err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
5776 + mutexp);
5777 if (err < 0)
5778 goto __error;
5779 if (cell->event.data.ext.ptr == NULL)
5780 diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
5781 index 32f959c17786..3abe306c394a 100644
5782 --- a/sound/core/seq/seq_memory.h
5783 +++ b/sound/core/seq/seq_memory.h
5784 @@ -66,7 +66,8 @@ struct snd_seq_pool {
5785 void snd_seq_cell_free(struct snd_seq_event_cell *cell);
5786
5787 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
5788 - struct snd_seq_event_cell **cellp, int nonblock, struct file *file);
5789 + struct snd_seq_event_cell **cellp, int nonblock,
5790 + struct file *file, struct mutex *mutexp);
5791
5792 /* return number of unused (free) cells */
5793 static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
5794 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
5795 index 37e1cf8218ff..5b4dbcec6de8 100644
5796 --- a/sound/pci/hda/patch_conexant.c
5797 +++ b/sound/pci/hda/patch_conexant.c
5798 @@ -957,6 +957,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
5799 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
5800 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
5801 SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
5802 + SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
5803 + SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
5804 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
5805 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
5806 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
5807 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5808 index 454476b47b79..3d19efd2783a 100644
5809 --- a/sound/pci/hda/patch_realtek.c
5810 +++ b/sound/pci/hda/patch_realtek.c
5811 @@ -5129,6 +5129,16 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec,
5812 }
5813 }
5814
5815 +/* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */
5816 +static void alc295_fixup_disable_dac3(struct hda_codec *codec,
5817 + const struct hda_fixup *fix, int action)
5818 +{
5819 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
5820 + hda_nid_t conn[2] = { 0x02, 0x03 };
5821 + snd_hda_override_conn_list(codec, 0x17, 2, conn);
5822 + }
5823 +}
5824 +
5825 /* Hook to update amp GPIO4 for automute */
5826 static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
5827 struct hda_jack_callback *jack)
5828 @@ -5321,6 +5331,7 @@ enum {
5829 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
5830 ALC255_FIXUP_DELL_SPK_NOISE,
5831 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5832 + ALC295_FIXUP_DISABLE_DAC3,
5833 ALC280_FIXUP_HP_HEADSET_MIC,
5834 ALC221_FIXUP_HP_FRONT_MIC,
5835 ALC292_FIXUP_TPT460,
5836 @@ -5335,10 +5346,12 @@ enum {
5837 ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
5838 ALC233_FIXUP_LENOVO_MULTI_CODECS,
5839 ALC294_FIXUP_LENOVO_MIC_LOCATION,
5840 + ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
5841 ALC700_FIXUP_INTEL_REFERENCE,
5842 ALC274_FIXUP_DELL_BIND_DACS,
5843 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
5844 ALC298_FIXUP_TPT470_DOCK,
5845 + ALC255_FIXUP_DUMMY_LINEOUT_VERB,
5846 };
5847
5848 static const struct hda_fixup alc269_fixups[] = {
5849 @@ -6053,6 +6066,10 @@ static const struct hda_fixup alc269_fixups[] = {
5850 .chained = true,
5851 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
5852 },
5853 + [ALC295_FIXUP_DISABLE_DAC3] = {
5854 + .type = HDA_FIXUP_FUNC,
5855 + .v.func = alc295_fixup_disable_dac3,
5856 + },
5857 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
5858 .type = HDA_FIXUP_PINS,
5859 .v.pins = (const struct hda_pintbl[]) {
5860 @@ -6138,6 +6155,18 @@ static const struct hda_fixup alc269_fixups[] = {
5861 { }
5862 },
5863 },
5864 + [ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE] = {
5865 + .type = HDA_FIXUP_PINS,
5866 + .v.pins = (const struct hda_pintbl[]) {
5867 + { 0x16, 0x0101102f }, /* Rear Headset HP */
5868 + { 0x19, 0x02a1913c }, /* use as Front headset mic, without its own jack detect */
5869 + { 0x1a, 0x01a19030 }, /* Rear Headset MIC */
5870 + { 0x1b, 0x02011020 },
5871 + { }
5872 + },
5873 + .chained = true,
5874 + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
5875 + },
5876 [ALC700_FIXUP_INTEL_REFERENCE] = {
5877 .type = HDA_FIXUP_VERBS,
5878 .v.verbs = (const struct hda_verb[]) {
5879 @@ -6174,6 +6203,15 @@ static const struct hda_fixup alc269_fixups[] = {
5880 .chained = true,
5881 .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
5882 },
5883 + [ALC255_FIXUP_DUMMY_LINEOUT_VERB] = {
5884 + .type = HDA_FIXUP_PINS,
5885 + .v.pins = (const struct hda_pintbl[]) {
5886 + { 0x14, 0x0201101f },
5887 + { }
5888 + },
5889 + .chained = true,
5890 + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
5891 + },
5892 };
5893
5894 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5895 @@ -6222,10 +6260,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5896 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
5897 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5898 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
5899 + SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
5900 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
5901 + SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
5902 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5903 SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
5904 SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
5905 + SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
5906 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5907 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5908 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
5909 @@ -6363,9 +6404,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5910 SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
5911 SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
5912 SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
5913 + SND_PCI_QUIRK(0x17aa, 0x2249, "Thinkpad", ALC292_FIXUP_TPT460),
5914 SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
5915 SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
5916 SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
5917 + SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5918 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
5919 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
5920 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
5921 @@ -6722,7 +6765,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5922 {0x12, 0x90a60120},
5923 {0x14, 0x90170110},
5924 {0x21, 0x0321101f}),
5925 - SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5926 + SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
5927 {0x12, 0xb7a60130},
5928 {0x14, 0x90170110},
5929 {0x21, 0x04211020}),
5930 diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
5931 index 88ff54220007..69ab55956492 100644
5932 --- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
5933 +++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
5934 @@ -604,6 +604,8 @@ static int kabylake_card_late_probe(struct snd_soc_card *card)
5935
5936 list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
5937 codec = pcm->codec_dai->codec;
5938 + snprintf(jack_name, sizeof(jack_name),
5939 + "HDMI/DP,pcm=%d Jack", pcm->device);
5940 err = snd_soc_card_jack_new(card, jack_name,
5941 SND_JACK_AVOUT, &ctx->kabylake_hdmi[i],
5942 NULL, 0);
5943 diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
5944 index 57254f5b2779..694abc628e9b 100644
5945 --- a/tools/objtool/builtin-check.c
5946 +++ b/tools/objtool/builtin-check.c
5947 @@ -29,7 +29,7 @@
5948 #include "builtin.h"
5949 #include "check.h"
5950
5951 -bool no_fp, no_unreachable;
5952 +bool no_fp, no_unreachable, retpoline, module;
5953
5954 static const char * const check_usage[] = {
5955 "objtool check [<options>] file.o",
5956 @@ -39,6 +39,8 @@ static const char * const check_usage[] = {
5957 const struct option check_options[] = {
5958 OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
5959 OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
5960 + OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
5961 + OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
5962 OPT_END(),
5963 };
5964
5965 @@ -53,5 +55,5 @@ int cmd_check(int argc, const char **argv)
5966
5967 objname = argv[0];
5968
5969 - return check(objname, no_fp, no_unreachable, false);
5970 + return check(objname, false);
5971 }
5972 diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
5973 index 91e8e19ff5e0..77ea2b97117d 100644
5974 --- a/tools/objtool/builtin-orc.c
5975 +++ b/tools/objtool/builtin-orc.c
5976 @@ -25,7 +25,6 @@
5977 */
5978
5979 #include <string.h>
5980 -#include <subcmd/parse-options.h>
5981 #include "builtin.h"
5982 #include "check.h"
5983
5984 @@ -36,9 +35,6 @@ static const char *orc_usage[] = {
5985 NULL,
5986 };
5987
5988 -extern const struct option check_options[];
5989 -extern bool no_fp, no_unreachable;
5990 -
5991 int cmd_orc(int argc, const char **argv)
5992 {
5993 const char *objname;
5994 @@ -54,7 +50,7 @@ int cmd_orc(int argc, const char **argv)
5995
5996 objname = argv[0];
5997
5998 - return check(objname, no_fp, no_unreachable, true);
5999 + return check(objname, true);
6000 }
6001
6002 if (!strcmp(argv[0], "dump")) {
6003 diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h
6004 index dd526067fed5..28ff40e19a14 100644
6005 --- a/tools/objtool/builtin.h
6006 +++ b/tools/objtool/builtin.h
6007 @@ -17,6 +17,11 @@
6008 #ifndef _BUILTIN_H
6009 #define _BUILTIN_H
6010
6011 +#include <subcmd/parse-options.h>
6012 +
6013 +extern const struct option check_options[];
6014 +extern bool no_fp, no_unreachable, retpoline, module;
6015 +
6016 extern int cmd_check(int argc, const char **argv);
6017 extern int cmd_orc(int argc, const char **argv);
6018
6019 diff --git a/tools/objtool/check.c b/tools/objtool/check.c
6020 index c7fb5c2392ee..9d01d0b1084e 100644
6021 --- a/tools/objtool/check.c
6022 +++ b/tools/objtool/check.c
6023 @@ -18,6 +18,7 @@
6024 #include <string.h>
6025 #include <stdlib.h>
6026
6027 +#include "builtin.h"
6028 #include "check.h"
6029 #include "elf.h"
6030 #include "special.h"
6031 @@ -33,7 +34,6 @@ struct alternative {
6032 };
6033
6034 const char *objname;
6035 -static bool no_fp;
6036 struct cfi_state initial_func_cfi;
6037
6038 struct instruction *find_insn(struct objtool_file *file,
6039 @@ -496,6 +496,7 @@ static int add_jump_destinations(struct objtool_file *file)
6040 * disguise, so convert them accordingly.
6041 */
6042 insn->type = INSN_JUMP_DYNAMIC;
6043 + insn->retpoline_safe = true;
6044 continue;
6045 } else {
6046 /* sibling call */
6047 @@ -547,7 +548,8 @@ static int add_call_destinations(struct objtool_file *file)
6048 if (!insn->call_dest && !insn->ignore) {
6049 WARN_FUNC("unsupported intra-function call",
6050 insn->sec, insn->offset);
6051 - WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
6052 + if (retpoline)
6053 + WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
6054 return -1;
6055 }
6056
6057 @@ -922,7 +924,11 @@ static struct rela *find_switch_table(struct objtool_file *file,
6058 if (find_symbol_containing(file->rodata, text_rela->addend))
6059 continue;
6060
6061 - return find_rela_by_dest(file->rodata, text_rela->addend);
6062 + rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend);
6063 + if (!rodata_rela)
6064 + continue;
6065 +
6066 + return rodata_rela;
6067 }
6068
6069 return NULL;
6070 @@ -1107,6 +1113,41 @@ static int read_unwind_hints(struct objtool_file *file)
6071 return 0;
6072 }
6073
6074 +static int read_retpoline_hints(struct objtool_file *file)
6075 +{
6076 + struct section *sec;
6077 + struct instruction *insn;
6078 + struct rela *rela;
6079 +
6080 + sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
6081 + if (!sec)
6082 + return 0;
6083 +
6084 + list_for_each_entry(rela, &sec->rela_list, list) {
6085 + if (rela->sym->type != STT_SECTION) {
6086 + WARN("unexpected relocation symbol type in %s", sec->name);
6087 + return -1;
6088 + }
6089 +
6090 + insn = find_insn(file, rela->sym->sec, rela->addend);
6091 + if (!insn) {
6092 + WARN("bad .discard.retpoline_safe entry");
6093 + return -1;
6094 + }
6095 +
6096 + if (insn->type != INSN_JUMP_DYNAMIC &&
6097 + insn->type != INSN_CALL_DYNAMIC) {
6098 + WARN_FUNC("retpoline_safe hint not an indirect jump/call",
6099 + insn->sec, insn->offset);
6100 + return -1;
6101 + }
6102 +
6103 + insn->retpoline_safe = true;
6104 + }
6105 +
6106 + return 0;
6107 +}
6108 +
6109 static int decode_sections(struct objtool_file *file)
6110 {
6111 int ret;
6112 @@ -1145,6 +1186,10 @@ static int decode_sections(struct objtool_file *file)
6113 if (ret)
6114 return ret;
6115
6116 + ret = read_retpoline_hints(file);
6117 + if (ret)
6118 + return ret;
6119 +
6120 return 0;
6121 }
6122
6123 @@ -1890,6 +1935,38 @@ static int validate_unwind_hints(struct objtool_file *file)
6124 return warnings;
6125 }
6126
6127 +static int validate_retpoline(struct objtool_file *file)
6128 +{
6129 + struct instruction *insn;
6130 + int warnings = 0;
6131 +
6132 + for_each_insn(file, insn) {
6133 + if (insn->type != INSN_JUMP_DYNAMIC &&
6134 + insn->type != INSN_CALL_DYNAMIC)
6135 + continue;
6136 +
6137 + if (insn->retpoline_safe)
6138 + continue;
6139 +
6140 + /*
6141 + * .init.text code is ran before userspace and thus doesn't
6142 + * strictly need retpolines, except for modules which are
6143 + * loaded late, they very much do need retpoline in their
6144 + * .init.text
6145 + */
6146 + if (!strcmp(insn->sec->name, ".init.text") && !module)
6147 + continue;
6148 +
6149 + WARN_FUNC("indirect %s found in RETPOLINE build",
6150 + insn->sec, insn->offset,
6151 + insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
6152 +
6153 + warnings++;
6154 + }
6155 +
6156 + return warnings;
6157 +}
6158 +
6159 static bool is_kasan_insn(struct instruction *insn)
6160 {
6161 return (insn->type == INSN_CALL &&
6162 @@ -2021,13 +2098,12 @@ static void cleanup(struct objtool_file *file)
6163 elf_close(file->elf);
6164 }
6165
6166 -int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc)
6167 +int check(const char *_objname, bool orc)
6168 {
6169 struct objtool_file file;
6170 int ret, warnings = 0;
6171
6172 objname = _objname;
6173 - no_fp = _no_fp;
6174
6175 file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY);
6176 if (!file.elf)
6177 @@ -2051,6 +2127,13 @@ int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc)
6178 if (list_empty(&file.insn_list))
6179 goto out;
6180
6181 + if (retpoline) {
6182 + ret = validate_retpoline(&file);
6183 + if (ret < 0)
6184 + return ret;
6185 + warnings += ret;
6186 + }
6187 +
6188 ret = validate_functions(&file);
6189 if (ret < 0)
6190 goto out;
6191 diff --git a/tools/objtool/check.h b/tools/objtool/check.h
6192 index 23a1d065cae1..c6b68fcb926f 100644
6193 --- a/tools/objtool/check.h
6194 +++ b/tools/objtool/check.h
6195 @@ -45,6 +45,7 @@ struct instruction {
6196 unsigned char type;
6197 unsigned long immediate;
6198 bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
6199 + bool retpoline_safe;
6200 struct symbol *call_dest;
6201 struct instruction *jump_dest;
6202 struct instruction *first_jump_src;
6203 @@ -63,7 +64,7 @@ struct objtool_file {
6204 bool ignore_unreachables, c_file, hints;
6205 };
6206
6207 -int check(const char *objname, bool no_fp, bool no_unreachable, bool orc);
6208 +int check(const char *objname, bool orc);
6209
6210 struct instruction *find_insn(struct objtool_file *file,
6211 struct section *sec, unsigned long offset);
6212 diff --git a/tools/perf/util/trigger.h b/tools/perf/util/trigger.h
6213 index 370138e7e35c..88223bc7c82b 100644
6214 --- a/tools/perf/util/trigger.h
6215 +++ b/tools/perf/util/trigger.h
6216 @@ -12,7 +12,7 @@
6217 * States and transits:
6218 *
6219 *
6220 - * OFF--(on)--> READY --(hit)--> HIT
6221 + * OFF--> ON --> READY --(hit)--> HIT
6222 * ^ |
6223 * | (ready)
6224 * | |
6225 @@ -27,8 +27,9 @@ struct trigger {
6226 volatile enum {
6227 TRIGGER_ERROR = -2,
6228 TRIGGER_OFF = -1,
6229 - TRIGGER_READY = 0,
6230 - TRIGGER_HIT = 1,
6231 + TRIGGER_ON = 0,
6232 + TRIGGER_READY = 1,
6233 + TRIGGER_HIT = 2,
6234 } state;
6235 const char *name;
6236 };
6237 @@ -50,7 +51,7 @@ static inline bool trigger_is_error(struct trigger *t)
6238 static inline void trigger_on(struct trigger *t)
6239 {
6240 TRIGGER_WARN_ONCE(t, TRIGGER_OFF);
6241 - t->state = TRIGGER_READY;
6242 + t->state = TRIGGER_ON;
6243 }
6244
6245 static inline void trigger_ready(struct trigger *t)