Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0132-5.4.33-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3513 - (show annotations) (download)
Mon May 11 14:36:39 2020 UTC (3 years, 11 months ago) by niro
File size: 329482 byte(s)
-linux-5.4.33
1 diff --git a/Documentation/sound/hd-audio/index.rst b/Documentation/sound/hd-audio/index.rst
2 index f8a72ffffe66..6e12de9fc34e 100644
3 --- a/Documentation/sound/hd-audio/index.rst
4 +++ b/Documentation/sound/hd-audio/index.rst
5 @@ -8,3 +8,4 @@ HD-Audio
6 models
7 controls
8 dp-mst
9 + realtek-pc-beep
10 diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
11 index 11298f0ce44d..0ea967d34583 100644
12 --- a/Documentation/sound/hd-audio/models.rst
13 +++ b/Documentation/sound/hd-audio/models.rst
14 @@ -216,8 +216,6 @@ alc298-dell-aio
15 ALC298 fixups on Dell AIO machines
16 alc275-dell-xps
17 ALC275 fixups on Dell XPS models
18 -alc256-dell-xps13
19 - ALC256 fixups on Dell XPS13
20 lenovo-spk-noise
21 Workaround for speaker noise on Lenovo machines
22 lenovo-hotkey
23 diff --git a/Documentation/sound/hd-audio/realtek-pc-beep.rst b/Documentation/sound/hd-audio/realtek-pc-beep.rst
24 new file mode 100644
25 index 000000000000..be47c6f76a6e
26 --- /dev/null
27 +++ b/Documentation/sound/hd-audio/realtek-pc-beep.rst
28 @@ -0,0 +1,129 @@
29 +===============================
30 +Realtek PC Beep Hidden Register
31 +===============================
32 +
33 +This file documents the "PC Beep Hidden Register", which is present in certain
34 +Realtek HDA codecs and controls a muxer and pair of passthrough mixers that can
35 +route audio between pins but aren't themselves exposed as HDA widgets. As far
36 +as I can tell, these hidden routes are designed to allow flexible PC Beep output
37 +for codecs that don't have mixer widgets in their output paths. Why it's easier
38 +to hide a mixer behind an undocumented vendor register than to just expose it
39 +as a widget, I have no idea.
40 +
41 +Register Description
42 +====================
43 +
44 +The register is accessed via processing coefficient 0x36 on NID 20h. Bits not
45 +identified below have no discernible effect on my machine, a Dell XPS 13 9350::
46 +
47 + MSB LSB
48 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
49 + | |h|S|L| | B |R| | Known bits
50 + +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
51 + |0|0|1|1| 0x7 |0|0x0|1| 0x7 | Reset value
52 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
53 +
54 +1Ah input select (B): 2 bits
55 + When zero, expose the PC Beep line (from the internal beep generator, when
56 + enabled with the Set Beep Generation verb on NID 01h, or else from the
57 + external PCBEEP pin) on the 1Ah pin node. When nonzero, expose the headphone
58 + jack (or possibly Line In on some machines) input instead. If PC Beep is
59 + selected, the 1Ah boost control has no effect.
60 +
61 +Amplify 1Ah loopback, left (L): 1 bit
62 + Amplify the left channel of 1Ah before mixing it into outputs as specified
63 + by h and S bits. Does not affect the level of 1Ah exposed to other widgets.
64 +
65 +Amplify 1Ah loopback, right (R): 1 bit
66 + Amplify the right channel of 1Ah before mixing it into outputs as specified
67 + by h and S bits. Does not affect the level of 1Ah exposed to other widgets.
68 +
69 +Loopback 1Ah to 21h [active low] (h): 1 bit
70 + When zero, mix 1Ah (possibly with amplification, depending on L and R bits)
71 + into 21h (headphone jack on my machine). Mixed signal respects the mute
72 + setting on 21h.
73 +
74 +Loopback 1Ah to 14h (S): 1 bit
75 + When one, mix 1Ah (possibly with amplification, depending on L and R bits)
76 + into 14h (internal speaker on my machine). Mixed signal **ignores** the mute
77 + setting on 14h and is present whenever 14h is configured as an output.
78 +
79 +Path diagrams
80 +=============
81 +
82 +1Ah input selection (DIV is the PC Beep divider set on NID 01h)::
83 +
84 + <Beep generator> <PCBEEP pin> <Headphone jack>
85 + | | |
86 + +--DIV--+--!DIV--+ {1Ah boost control}
87 + | |
88 + +--(b == 0)--+--(b != 0)--+
89 + |
90 + >1Ah (Beep/Headphone Mic/Line In)<
91 +
92 +Loopback of 1Ah to 21h/14h::
93 +
94 + <1Ah (Beep/Headphone Mic/Line In)>
95 + |
96 + {amplify if L/R}
97 + |
98 + +-----!h-----+-----S-----+
99 + | |
100 + {21h mute control} |
101 + | |
102 + >21h (Headphone)< >14h (Internal Speaker)<
103 +
104 +Background
105 +==========
106 +
107 +All Realtek HDA codecs have a vendor-defined widget with node ID 20h which
108 +provides access to a bank of registers that control various codec functions.
109 +Registers are read and written via the standard HDA processing coefficient
110 +verbs (Set/Get Coefficient Index, Set/Get Processing Coefficient). The node is
111 +named "Realtek Vendor Registers" in public datasheets' verb listings and,
112 +apart from that, is entirely undocumented.
113 +
114 +This particular register, exposed at coefficient 0x36 and named in commits from
115 +Realtek, is of note: unlike most registers, which seem to control detailed
116 +amplifier parameters not in scope of the HDA specification, it controls audio
117 +routing which could just as easily have been defined using standard HDA mixer
118 +and selector widgets.
119 +
120 +Specifically, it selects between two sources for the input pin widget with Node
121 +ID (NID) 1Ah: the widget's signal can come either from an audio jack (on my
122 +laptop, a Dell XPS 13 9350, it's the headphone jack, but comments in Realtek
123 +commits indicate that it might be a Line In on some machines) or from the PC
124 +Beep line (which is itself multiplexed between the codec's internal beep
125 +generator and external PCBEEP pin, depending on if the beep generator is
126 +enabled via verbs on NID 01h). Additionally, it can mix (with optional
127 +amplification) that signal onto the 21h and/or 14h output pins.
128 +
129 +The register's reset value is 0x3717, corresponding to PC Beep on 1Ah that is
130 +then amplified and mixed into both the headphones and the speakers. Not only
131 +does this violate the HDA specification, which says that "[a vendor defined
132 +beep input pin] connection may be maintained *only* while the Link reset
133 +(**RST#**) is asserted", it means that we cannot ignore the register if we care
134 +about the input that 1Ah would otherwise expose or if the PCBEEP trace is
135 +poorly shielded and picks up chassis noise (both of which are the case on my
136 +machine).
137 +
138 +Unfortunately, there are lots of ways to get this register configuration wrong.
139 +Linux, it seems, has gone through most of them. For one, the register resets
140 +after S3 suspend: judging by existing code, this isn't the case for all vendor
141 +registers, and it's led to some fixes that improve behavior on cold boot but
142 +don't last after suspend. Other fixes have successfully switched the 1Ah input
143 +away from PC Beep but have failed to disable both loopback paths. On my
144 +machine, this means that the headphone input is amplified and looped back to
145 +the headphone output, which uses the exact same pins! As you might expect, this
146 +causes terrible headphone noise, the character of which is controlled by the
147 +1Ah boost control. (If you've seen instructions online to fix XPS 13 headphone
148 +noise by changing "Headphone Mic Boost" in ALSA, now you know why.)
149 +
150 +The information here has been obtained through black-box reverse engineering of
151 +the ALC256 codec's behavior and is not guaranteed to be correct. It likely
152 +also applies for the ALC255, ALC257, ALC235, and ALC236, since those codecs
153 +seem to be close relatives of the ALC256. (They all share one initialization
154 +function.) Additionally, other codecs like the ALC225 and ALC285 also have this
155 +register, judging by existing fixups in ``patch_realtek.c``, but specific
156 +data (e.g. node IDs, bit positions, pin mappings) for those codecs may differ
157 +from what I've described here.
158 diff --git a/Makefile b/Makefile
159 index c2d5975844d9..c09d5a4d2e7a 100644
160 --- a/Makefile
161 +++ b/Makefile
162 @@ -1,7 +1,7 @@
163 # SPDX-License-Identifier: GPL-2.0
164 VERSION = 5
165 PATCHLEVEL = 4
166 -SUBLEVEL = 32
167 +SUBLEVEL = 33
168 EXTRAVERSION =
169 NAME = Kleptomaniac Octopus
170
171 diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts
172 index 3931fb068ff0..91d1018ab75f 100644
173 --- a/arch/arm/boot/dts/dm8148-evm.dts
174 +++ b/arch/arm/boot/dts/dm8148-evm.dts
175 @@ -24,12 +24,12 @@
176
177 &cpsw_emac0 {
178 phy-handle = <&ethphy0>;
179 - phy-mode = "rgmii";
180 + phy-mode = "rgmii-id";
181 };
182
183 &cpsw_emac1 {
184 phy-handle = <&ethphy1>;
185 - phy-mode = "rgmii";
186 + phy-mode = "rgmii-id";
187 };
188
189 &davinci_mdio {
190 diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts
191 index 9e43d5ec0bb2..79ccdd4470f4 100644
192 --- a/arch/arm/boot/dts/dm8148-t410.dts
193 +++ b/arch/arm/boot/dts/dm8148-t410.dts
194 @@ -33,12 +33,12 @@
195
196 &cpsw_emac0 {
197 phy-handle = <&ethphy0>;
198 - phy-mode = "rgmii";
199 + phy-mode = "rgmii-id";
200 };
201
202 &cpsw_emac1 {
203 phy-handle = <&ethphy1>;
204 - phy-mode = "rgmii";
205 + phy-mode = "rgmii-id";
206 };
207
208 &davinci_mdio {
209 diff --git a/arch/arm/boot/dts/dra62x-j5eco-evm.dts b/arch/arm/boot/dts/dra62x-j5eco-evm.dts
210 index 861ab90a3f3a..c16e183822be 100644
211 --- a/arch/arm/boot/dts/dra62x-j5eco-evm.dts
212 +++ b/arch/arm/boot/dts/dra62x-j5eco-evm.dts
213 @@ -24,12 +24,12 @@
214
215 &cpsw_emac0 {
216 phy-handle = <&ethphy0>;
217 - phy-mode = "rgmii";
218 + phy-mode = "rgmii-id";
219 };
220
221 &cpsw_emac1 {
222 phy-handle = <&ethphy1>;
223 - phy-mode = "rgmii";
224 + phy-mode = "rgmii-id";
225 };
226
227 &davinci_mdio {
228 diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
229 index 09d3d54d09ff..1b5578381d78 100644
230 --- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
231 +++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
232 @@ -115,7 +115,7 @@
233 gpio-sck = <&gpy3 1 GPIO_ACTIVE_HIGH>;
234 gpio-mosi = <&gpy3 3 GPIO_ACTIVE_HIGH>;
235 num-chipselects = <1>;
236 - cs-gpios = <&gpy4 3 GPIO_ACTIVE_HIGH>;
237 + cs-gpios = <&gpy4 3 GPIO_ACTIVE_LOW>;
238
239 lcd@0 {
240 compatible = "samsung,ld9040";
241 @@ -124,8 +124,6 @@
242 vci-supply = <&ldo17_reg>;
243 reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>;
244 spi-max-frequency = <1200000>;
245 - spi-cpol;
246 - spi-cpha;
247 power-on-delay = <10>;
248 reset-delay = <10>;
249 panel-width-mm = <90>;
250 diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
251 index 397140454132..6bf93e5ed681 100644
252 --- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
253 +++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
254 @@ -358,8 +358,8 @@
255 };
256
257 &reg_dldo3 {
258 - regulator-min-microvolt = <2800000>;
259 - regulator-max-microvolt = <2800000>;
260 + regulator-min-microvolt = <1800000>;
261 + regulator-max-microvolt = <1800000>;
262 regulator-name = "vdd-csi";
263 };
264
265 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
266 index 2c0238ce0551..d65aef47ece3 100644
267 --- a/arch/arm64/Makefile
268 +++ b/arch/arm64/Makefile
269 @@ -72,6 +72,10 @@ stack_protector_prepare: prepare0
270 include/generated/asm-offsets.h))
271 endif
272
273 +# Ensure that if the compiler supports branch protection we default it
274 +# off.
275 +KBUILD_CFLAGS += $(call cc-option,-mbranch-protection=none)
276 +
277 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
278 KBUILD_CPPFLAGS += -mbig-endian
279 CHECKFLAGS += -D__AARCH64EB__
280 diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
281 index 1d34e3eefda3..eaf8f83794fd 100644
282 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
283 +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
284 @@ -77,8 +77,7 @@
285 };
286
287 pmu {
288 - compatible = "arm,cortex-a53-pmu",
289 - "arm,armv8-pmuv3";
290 + compatible = "arm,cortex-a53-pmu";
291 interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
292 <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
293 <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
294 diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
295 index d19253891672..ab081efd5971 100644
296 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
297 +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
298 @@ -71,8 +71,7 @@
299 };
300
301 pmu {
302 - compatible = "arm,cortex-a53-pmu",
303 - "arm,armv8-pmuv3";
304 + compatible = "arm,cortex-a53-pmu";
305 interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
306 <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
307 <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
308 diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
309 index 799c75fa7981..34a30842c47a 100644
310 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
311 +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
312 @@ -307,6 +307,7 @@
313 interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
314 dma-coherent;
315 power-domains = <&k3_pds 151 TI_SCI_PD_EXCLUSIVE>;
316 + clocks = <&k3_clks 151 2>, <&k3_clks 151 7>;
317 assigned-clocks = <&k3_clks 151 2>, <&k3_clks 151 7>;
318 assigned-clock-parents = <&k3_clks 151 4>, /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */
319 <&k3_clks 151 9>; /* set PIPE3_TXB_CLK to CLK_12M_RC/256 (for HS only) */
320 @@ -346,6 +347,7 @@
321 interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
322 dma-coherent;
323 power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>;
324 + clocks = <&k3_clks 152 2>;
325 assigned-clocks = <&k3_clks 152 2>;
326 assigned-clock-parents = <&k3_clks 152 4>; /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */
327
328 diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
329 index ca158be21f83..bcb14d11232f 100644
330 --- a/arch/arm64/kernel/armv8_deprecated.c
331 +++ b/arch/arm64/kernel/armv8_deprecated.c
332 @@ -601,7 +601,7 @@ static struct undef_hook setend_hooks[] = {
333 },
334 {
335 /* Thumb mode */
336 - .instr_mask = 0x0000fff7,
337 + .instr_mask = 0xfffffff7,
338 .instr_val = 0x0000b650,
339 .pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK),
340 .pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR),
341 diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
342 index f97be32bf699..3ad1f76c063a 100644
343 --- a/arch/mips/cavium-octeon/octeon-irq.c
344 +++ b/arch/mips/cavium-octeon/octeon-irq.c
345 @@ -2199,6 +2199,9 @@ static int octeon_irq_cib_map(struct irq_domain *d,
346 }
347
348 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
349 + if (!cd)
350 + return -ENOMEM;
351 +
352 cd->host_data = host_data;
353 cd->bit = hw;
354
355 diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
356 index 41bb91f05688..547d813ead48 100644
357 --- a/arch/mips/mm/tlbex.c
358 +++ b/arch/mips/mm/tlbex.c
359 @@ -1480,6 +1480,7 @@ static void build_r4000_tlb_refill_handler(void)
360
361 static void setup_pw(void)
362 {
363 + unsigned int pwctl;
364 unsigned long pgd_i, pgd_w;
365 #ifndef __PAGETABLE_PMD_FOLDED
366 unsigned long pmd_i, pmd_w;
367 @@ -1506,6 +1507,7 @@ static void setup_pw(void)
368
369 pte_i = ilog2(_PAGE_GLOBAL);
370 pte_w = 0;
371 + pwctl = 1 << 30; /* Set PWDirExt */
372
373 #ifndef __PAGETABLE_PMD_FOLDED
374 write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
375 @@ -1516,8 +1518,9 @@ static void setup_pw(void)
376 #endif
377
378 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
379 - write_c0_pwctl(1 << 6 | psn);
380 + pwctl |= (1 << 6 | psn);
381 #endif
382 + write_c0_pwctl(pwctl);
383 write_c0_kpgd((long)swapper_pg_dir);
384 kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
385 }
386 diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
387 index 8fd8599c9395..3f9ae3585ab9 100644
388 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
389 +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
390 @@ -156,6 +156,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
391 extern int hash__has_transparent_hugepage(void);
392 #endif
393
394 +static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
395 +{
396 + BUG();
397 + return pmd;
398 +}
399 +
400 #endif /* !__ASSEMBLY__ */
401
402 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
403 diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
404 index d1d9177d9ebd..0729c034e56f 100644
405 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
406 +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
407 @@ -246,7 +246,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
408 */
409 static inline int hash__pmd_trans_huge(pmd_t pmd)
410 {
411 - return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
412 + return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) ==
413 (_PAGE_PTE | H_PAGE_THP_HUGE));
414 }
415
416 @@ -272,6 +272,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
417 unsigned long addr, pmd_t *pmdp);
418 extern int hash__has_transparent_hugepage(void);
419 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
420 +
421 +static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
422 +{
423 + return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
424 +}
425 +
426 #endif /* __ASSEMBLY__ */
427
428 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
429 diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
430 index b01624e5c467..a143d394ff46 100644
431 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
432 +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
433 @@ -1303,7 +1303,9 @@ extern void serialize_against_pte_lookup(struct mm_struct *mm);
434
435 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
436 {
437 - return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
438 + if (radix_enabled())
439 + return radix__pmd_mkdevmap(pmd);
440 + return hash__pmd_mkdevmap(pmd);
441 }
442
443 static inline int pmd_devmap(pmd_t pmd)
444 diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
445 index d97db3ad9aae..a1c60d5b50af 100644
446 --- a/arch/powerpc/include/asm/book3s/64/radix.h
447 +++ b/arch/powerpc/include/asm/book3s/64/radix.h
448 @@ -263,6 +263,11 @@ static inline int radix__has_transparent_hugepage(void)
449 }
450 #endif
451
452 +static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
453 +{
454 + return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
455 +}
456 +
457 extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
458 unsigned long page_size,
459 unsigned long phys);
460 diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
461 index 3d76e1c388c2..28c3d936fdf3 100644
462 --- a/arch/powerpc/include/asm/drmem.h
463 +++ b/arch/powerpc/include/asm/drmem.h
464 @@ -27,12 +27,12 @@ struct drmem_lmb_info {
465 extern struct drmem_lmb_info *drmem_info;
466
467 #define for_each_drmem_lmb_in_range(lmb, start, end) \
468 - for ((lmb) = (start); (lmb) <= (end); (lmb)++)
469 + for ((lmb) = (start); (lmb) < (end); (lmb)++)
470
471 #define for_each_drmem_lmb(lmb) \
472 for_each_drmem_lmb_in_range((lmb), \
473 &drmem_info->lmbs[0], \
474 - &drmem_info->lmbs[drmem_info->n_lmbs - 1])
475 + &drmem_info->lmbs[drmem_info->n_lmbs])
476
477 /*
478 * The of_drconf_cell_v1 struct defines the layout of the LMB data
479 diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h
480 index e9f81bb3f83b..f798e80e4106 100644
481 --- a/arch/powerpc/include/asm/setjmp.h
482 +++ b/arch/powerpc/include/asm/setjmp.h
483 @@ -7,7 +7,9 @@
484
485 #define JMP_BUF_LEN 23
486
487 -extern long setjmp(long *) __attribute__((returns_twice));
488 -extern void longjmp(long *, long) __attribute__((noreturn));
489 +typedef long jmp_buf[JMP_BUF_LEN];
490 +
491 +extern int setjmp(jmp_buf env) __attribute__((returns_twice));
492 +extern void longjmp(jmp_buf env, int val) __attribute__((noreturn));
493
494 #endif /* _ASM_POWERPC_SETJMP_H */
495 diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
496 index 3c02445cf086..dc0780f930d5 100644
497 --- a/arch/powerpc/kernel/Makefile
498 +++ b/arch/powerpc/kernel/Makefile
499 @@ -5,9 +5,6 @@
500
501 CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
502
503 -# Avoid clang warnings around longjmp/setjmp declarations
504 -CFLAGS_crash.o += -ffreestanding
505 -
506 ifdef CONFIG_PPC64
507 CFLAGS_prom_init.o += $(NO_MINIMAL_TOC)
508 endif
509 diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
510 index 180b3a5d1001..05606025a131 100644
511 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c
512 +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
513 @@ -139,7 +139,6 @@ static void __init cpufeatures_setup_cpu(void)
514 /* Initialize the base environment -- clear FSCR/HFSCR. */
515 hv_mode = !!(mfmsr() & MSR_HV);
516 if (hv_mode) {
517 - /* CPU_FTR_HVMODE is used early in PACA setup */
518 cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
519 mtspr(SPRN_HFSCR, 0);
520 }
521 diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
522 index 2d27ec4feee4..9b340af02c38 100644
523 --- a/arch/powerpc/kernel/kprobes.c
524 +++ b/arch/powerpc/kernel/kprobes.c
525 @@ -264,6 +264,9 @@ int kprobe_handler(struct pt_regs *regs)
526 if (user_mode(regs))
527 return 0;
528
529 + if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
530 + return 0;
531 +
532 /*
533 * We don't want to be preempted for the entire
534 * duration of kprobe processing
535 diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
536 index 949eceb254d8..3f91ccaa9c74 100644
537 --- a/arch/powerpc/kernel/paca.c
538 +++ b/arch/powerpc/kernel/paca.c
539 @@ -176,7 +176,7 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
540 struct paca_struct **paca_ptrs __read_mostly;
541 EXPORT_SYMBOL(paca_ptrs);
542
543 -void __init initialise_paca(struct paca_struct *new_paca, int cpu)
544 +void __init __nostackprotector initialise_paca(struct paca_struct *new_paca, int cpu)
545 {
546 #ifdef CONFIG_PPC_PSERIES
547 new_paca->lppaca_ptr = NULL;
548 @@ -205,7 +205,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
549 }
550
551 /* Put the paca pointer into r13 and SPRG_PACA */
552 -void setup_paca(struct paca_struct *new_paca)
553 +void __nostackprotector setup_paca(struct paca_struct *new_paca)
554 {
555 /* Setup r13 */
556 local_paca = new_paca;
557 @@ -214,11 +214,15 @@ void setup_paca(struct paca_struct *new_paca)
558 /* On Book3E, initialize the TLB miss exception frames */
559 mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
560 #else
561 - /* In HV mode, we setup both HPACA and PACA to avoid problems
562 + /*
563 + * In HV mode, we setup both HPACA and PACA to avoid problems
564 * if we do a GET_PACA() before the feature fixups have been
565 - * applied
566 + * applied.
567 + *
568 + * Normally you should test against CPU_FTR_HVMODE, but CPU features
569 + * are not yet set up when we first reach here.
570 */
571 - if (early_cpu_has_feature(CPU_FTR_HVMODE))
572 + if (mfmsr() & MSR_HV)
573 mtspr(SPRN_SPRG_HPACA, local_paca);
574 #endif
575 mtspr(SPRN_SPRG_PACA, local_paca);
576 diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
577 index c82577c4b15d..1b02d338a5f5 100644
578 --- a/arch/powerpc/kernel/setup.h
579 +++ b/arch/powerpc/kernel/setup.h
580 @@ -8,6 +8,12 @@
581 #ifndef __ARCH_POWERPC_KERNEL_SETUP_H
582 #define __ARCH_POWERPC_KERNEL_SETUP_H
583
584 +#ifdef CONFIG_CC_IS_CLANG
585 +#define __nostackprotector
586 +#else
587 +#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
588 +#endif
589 +
590 void initialize_cache_info(void);
591 void irqstack_early_init(void);
592
593 diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
594 index 44b4c432a273..d7ff21316cfa 100644
595 --- a/arch/powerpc/kernel/setup_64.c
596 +++ b/arch/powerpc/kernel/setup_64.c
597 @@ -284,24 +284,42 @@ void __init record_spr_defaults(void)
598 * device-tree is not accessible via normal means at this point.
599 */
600
601 -void __init early_setup(unsigned long dt_ptr)
602 +void __init __nostackprotector early_setup(unsigned long dt_ptr)
603 {
604 static __initdata struct paca_struct boot_paca;
605
606 /* -------- printk is _NOT_ safe to use here ! ------- */
607
608 - /* Try new device tree based feature discovery ... */
609 - if (!dt_cpu_ftrs_init(__va(dt_ptr)))
610 - /* Otherwise use the old style CPU table */
611 - identify_cpu(0, mfspr(SPRN_PVR));
612 -
613 - /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
614 + /*
615 + * Assume we're on cpu 0 for now.
616 + *
617 + * We need to load a PACA very early for a few reasons.
618 + *
619 + * The stack protector canary is stored in the paca, so as soon as we
620 + * call any stack protected code we need r13 pointing somewhere valid.
621 + *
622 + * If we are using kcov it will call in_task() in its instrumentation,
623 + * which relies on the current task from the PACA.
624 + *
625 + * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
626 + * printk(), which can trigger both stack protector and kcov.
627 + *
628 + * percpu variables and spin locks also use the paca.
629 + *
630 + * So set up a temporary paca. It will be replaced below once we know
631 + * what CPU we are on.
632 + */
633 initialise_paca(&boot_paca, 0);
634 setup_paca(&boot_paca);
635 fixup_boot_paca();
636
637 /* -------- printk is now safe to use ------- */
638
639 + /* Try new device tree based feature discovery ... */
640 + if (!dt_cpu_ftrs_init(__va(dt_ptr)))
641 + /* Otherwise use the old style CPU table */
642 + identify_cpu(0, mfspr(SPRN_PVR));
643 +
644 /* Enable early debugging if any specified (see udbg.h) */
645 udbg_early_init();
646
647 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
648 index 84ed2e77ef9c..adfde59cf4ba 100644
649 --- a/arch/powerpc/kernel/signal_64.c
650 +++ b/arch/powerpc/kernel/signal_64.c
651 @@ -473,8 +473,10 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
652 err |= __get_user(tsk->thread.ckpt_regs.ccr,
653 &sc->gp_regs[PT_CCR]);
654
655 + /* Don't allow userspace to set the trap value */
656 + regs->trap = 0;
657 +
658 /* These regs are not checkpointed; they can go in 'regs'. */
659 - err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
660 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
661 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
662 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
663 diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
664 index 0e6ed4413eea..1cfe57b51d7e 100644
665 --- a/arch/powerpc/mm/kasan/kasan_init_32.c
666 +++ b/arch/powerpc/mm/kasan/kasan_init_32.c
667 @@ -117,7 +117,7 @@ static void __init kasan_remap_early_shadow_ro(void)
668
669 kasan_populate_pte(kasan_early_shadow_pte, prot);
670
671 - for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
672 + for (k_cur = k_start & PAGE_MASK; k_cur != k_end; k_cur += PAGE_SIZE) {
673 pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
674 pte_t *ptep = pte_offset_kernel(pmd, k_cur);
675
676 diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S
677 index 2ca407cedbe7..eaeee402f96e 100644
678 --- a/arch/powerpc/mm/nohash/tlb_low.S
679 +++ b/arch/powerpc/mm/nohash/tlb_low.S
680 @@ -397,7 +397,7 @@ _GLOBAL(set_context)
681 * extern void loadcam_entry(unsigned int index)
682 *
683 * Load TLBCAM[index] entry in to the L2 CAM MMU
684 - * Must preserve r7, r8, r9, and r10
685 + * Must preserve r7, r8, r9, r10 and r11
686 */
687 _GLOBAL(loadcam_entry)
688 mflr r5
689 @@ -433,6 +433,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
690 */
691 _GLOBAL(loadcam_multi)
692 mflr r8
693 + /* Don't switch to AS=1 if already there */
694 + mfmsr r11
695 + andi. r11,r11,MSR_IS
696 + bne 10f
697
698 /*
699 * Set up temporary TLB entry that is the same as what we're
700 @@ -458,6 +462,7 @@ _GLOBAL(loadcam_multi)
701 mtmsr r6
702 isync
703
704 +10:
705 mr r9,r3
706 add r10,r3,r4
707 2: bl loadcam_entry
708 @@ -466,6 +471,10 @@ _GLOBAL(loadcam_multi)
709 mr r3,r9
710 blt 2b
711
712 + /* Don't return to AS=0 if we were in AS=1 at function start */
713 + andi. r11,r11,MSR_IS
714 + bne 3f
715 +
716 /* Return to AS=0 and clear the temporary entry */
717 mfmsr r6
718 rlwinm. r6,r6,0,~(MSR_IS|MSR_DS)
719 @@ -481,6 +490,7 @@ _GLOBAL(loadcam_multi)
720 tlbwe
721 isync
722
723 +3:
724 mtlr r8
725 blr
726 #endif
727 diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
728 index 4c3af2e9eb8e..f1888352b4e0 100644
729 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c
730 +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
731 @@ -223,7 +223,7 @@ static int get_lmb_range(u32 drc_index, int n_lmbs,
732 struct drmem_lmb **end_lmb)
733 {
734 struct drmem_lmb *lmb, *start, *end;
735 - struct drmem_lmb *last_lmb;
736 + struct drmem_lmb *limit;
737
738 start = NULL;
739 for_each_drmem_lmb(lmb) {
740 @@ -236,10 +236,10 @@ static int get_lmb_range(u32 drc_index, int n_lmbs,
741 if (!start)
742 return -EINVAL;
743
744 - end = &start[n_lmbs - 1];
745 + end = &start[n_lmbs];
746
747 - last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
748 - if (end > last_lmb)
749 + limit = &drmem_info->lmbs[drmem_info->n_lmbs];
750 + if (end > limit)
751 return -EINVAL;
752
753 *start_lmb = start;
754 diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
755 index f87a5c64e24d..c93b9a3bf237 100644
756 --- a/arch/powerpc/platforms/pseries/lpar.c
757 +++ b/arch/powerpc/platforms/pseries/lpar.c
758 @@ -1992,7 +1992,7 @@ static int __init vpa_debugfs_init(void)
759 {
760 char name[16];
761 long i;
762 - static struct dentry *vpa_dir;
763 + struct dentry *vpa_dir;
764
765 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
766 return 0;
767 diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
768 index 9651ca061828..fe8d396e2301 100644
769 --- a/arch/powerpc/sysdev/xive/common.c
770 +++ b/arch/powerpc/sysdev/xive/common.c
771 @@ -68,13 +68,6 @@ static u32 xive_ipi_irq;
772 /* Xive state for each CPU */
773 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
774
775 -/*
776 - * A "disabled" interrupt should never fire, to catch problems
777 - * we set its logical number to this
778 - */
779 -#define XIVE_BAD_IRQ 0x7fffffff
780 -#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
781 -
782 /* An invalid CPU target */
783 #define XIVE_INVALID_TARGET (-1)
784
785 @@ -265,11 +258,15 @@ notrace void xmon_xive_do_dump(int cpu)
786
787 int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
788 {
789 + struct irq_chip *chip = irq_data_get_irq_chip(d);
790 int rc;
791 u32 target;
792 u8 prio;
793 u32 lirq;
794
795 + if (!is_xive_irq(chip))
796 + return -EINVAL;
797 +
798 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
799 if (rc) {
800 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
801 @@ -1150,7 +1147,7 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
802 xc = per_cpu(xive_cpu, cpu);
803
804 /* Check if we are already setup */
805 - if (xc->hw_ipi != 0)
806 + if (xc->hw_ipi != XIVE_BAD_IRQ)
807 return 0;
808
809 /* Grab an IPI from the backend, this will populate xc->hw_ipi */
810 @@ -1187,7 +1184,7 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
811 /* Disable the IPI and free the IRQ data */
812
813 /* Already cleaned up ? */
814 - if (xc->hw_ipi == 0)
815 + if (xc->hw_ipi == XIVE_BAD_IRQ)
816 return;
817
818 /* Mask the IPI */
819 @@ -1343,6 +1340,7 @@ static int xive_prepare_cpu(unsigned int cpu)
820 if (np)
821 xc->chip_id = of_get_ibm_chip_id(np);
822 of_node_put(np);
823 + xc->hw_ipi = XIVE_BAD_IRQ;
824
825 per_cpu(xive_cpu, cpu) = xc;
826 }
827 diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
828 index 0ff6b739052c..50e1a8e02497 100644
829 --- a/arch/powerpc/sysdev/xive/native.c
830 +++ b/arch/powerpc/sysdev/xive/native.c
831 @@ -312,7 +312,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
832 s64 rc;
833
834 /* Free the IPI */
835 - if (!xc->hw_ipi)
836 + if (xc->hw_ipi == XIVE_BAD_IRQ)
837 return;
838 for (;;) {
839 rc = opal_xive_free_irq(xc->hw_ipi);
840 @@ -320,7 +320,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
841 msleep(OPAL_BUSY_DELAY_MS);
842 continue;
843 }
844 - xc->hw_ipi = 0;
845 + xc->hw_ipi = XIVE_BAD_IRQ;
846 break;
847 }
848 }
849 diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
850 index 55dc61cb4867..3f15615712b5 100644
851 --- a/arch/powerpc/sysdev/xive/spapr.c
852 +++ b/arch/powerpc/sysdev/xive/spapr.c
853 @@ -560,11 +560,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
854
855 static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
856 {
857 - if (!xc->hw_ipi)
858 + if (xc->hw_ipi == XIVE_BAD_IRQ)
859 return;
860
861 xive_irq_bitmap_free(xc->hw_ipi);
862 - xc->hw_ipi = 0;
863 + xc->hw_ipi = XIVE_BAD_IRQ;
864 }
865 #endif /* CONFIG_SMP */
866
867 diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
868 index 59cd366e7933..382980f4de2d 100644
869 --- a/arch/powerpc/sysdev/xive/xive-internal.h
870 +++ b/arch/powerpc/sysdev/xive/xive-internal.h
871 @@ -5,6 +5,13 @@
872 #ifndef __XIVE_INTERNAL_H
873 #define __XIVE_INTERNAL_H
874
875 +/*
876 + * A "disabled" interrupt should never fire, to catch problems
877 + * we set its logical number to this
878 + */
879 +#define XIVE_BAD_IRQ 0x7fffffff
880 +#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
881 +
882 /* Each CPU carry one of these with various per-CPU state */
883 struct xive_cpu {
884 #ifdef CONFIG_SMP
885 diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
886 index c3842dbeb1b7..6f9cccea54f3 100644
887 --- a/arch/powerpc/xmon/Makefile
888 +++ b/arch/powerpc/xmon/Makefile
889 @@ -1,9 +1,6 @@
890 # SPDX-License-Identifier: GPL-2.0
891 # Makefile for xmon
892
893 -# Avoid clang warnings around longjmp/setjmp declarations
894 -subdir-ccflags-y := -ffreestanding
895 -
896 GCOV_PROFILE := n
897 KCOV_INSTRUMENT := n
898 UBSAN_SANITIZE := n
899 diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
900 index e9dac9a24d3f..61f2b0412345 100644
901 --- a/arch/s390/kernel/diag.c
902 +++ b/arch/s390/kernel/diag.c
903 @@ -84,7 +84,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
904
905 static void *show_diag_stat_start(struct seq_file *m, loff_t *pos)
906 {
907 - return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
908 + return *pos <= NR_DIAG_STAT ? (void *)((unsigned long) *pos + 1) : NULL;
909 }
910
911 static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos)
912 diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
913 index 076090f9e666..4f6c22d72072 100644
914 --- a/arch/s390/kvm/vsie.c
915 +++ b/arch/s390/kvm/vsie.c
916 @@ -1202,6 +1202,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
917 scb_s->iprcc = PGM_ADDRESSING;
918 scb_s->pgmilc = 4;
919 scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
920 + rc = 1;
921 }
922 return rc;
923 }
924 diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
925 index edcdca97e85e..9d9ab77d02dd 100644
926 --- a/arch/s390/mm/gmap.c
927 +++ b/arch/s390/mm/gmap.c
928 @@ -787,14 +787,18 @@ static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
929 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
930 unsigned long gaddr, int level)
931 {
932 + const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
933 unsigned long *table;
934
935 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
936 return NULL;
937 if (gmap_is_shadow(gmap) && gmap->removed)
938 return NULL;
939 - if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
940 +
941 + if (asce_type != _ASCE_TYPE_REGION1 &&
942 + gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
943 return NULL;
944 +
945 table = gmap->table;
946 switch (gmap->asce & _ASCE_TYPE_MASK) {
947 case _ASCE_TYPE_REGION1:
948 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
949 index 5e30eaaf8576..70ffce98c568 100644
950 --- a/arch/x86/boot/compressed/head_32.S
951 +++ b/arch/x86/boot/compressed/head_32.S
952 @@ -106,7 +106,7 @@ ENTRY(startup_32)
953 notl %eax
954 andl %eax, %ebx
955 cmpl $LOAD_PHYSICAL_ADDR, %ebx
956 - jge 1f
957 + jae 1f
958 #endif
959 movl $LOAD_PHYSICAL_ADDR, %ebx
960 1:
961 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
962 index e9a7f7cadb12..07d2002da642 100644
963 --- a/arch/x86/boot/compressed/head_64.S
964 +++ b/arch/x86/boot/compressed/head_64.S
965 @@ -106,7 +106,7 @@ ENTRY(startup_32)
966 notl %eax
967 andl %eax, %ebx
968 cmpl $LOAD_PHYSICAL_ADDR, %ebx
969 - jge 1f
970 + jae 1f
971 #endif
972 movl $LOAD_PHYSICAL_ADDR, %ebx
973 1:
974 @@ -297,7 +297,7 @@ ENTRY(startup_64)
975 notq %rax
976 andq %rax, %rbp
977 cmpq $LOAD_PHYSICAL_ADDR, %rbp
978 - jge 1f
979 + jae 1f
980 #endif
981 movq $LOAD_PHYSICAL_ADDR, %rbp
982 1:
983 diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
984 index f07baf0388bc..390edb763826 100644
985 --- a/arch/x86/entry/entry_32.S
986 +++ b/arch/x86/entry/entry_32.S
987 @@ -1647,6 +1647,7 @@ ENTRY(int3)
988 END(int3)
989
990 ENTRY(general_protection)
991 + ASM_CLAC
992 pushl $do_general_protection
993 jmp common_exception
994 END(general_protection)
995 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
996 index 734a3334e0f0..380cee9bc175 100644
997 --- a/arch/x86/include/asm/kvm_host.h
998 +++ b/arch/x86/include/asm/kvm_host.h
999 @@ -1130,7 +1130,7 @@ struct kvm_x86_ops {
1000 bool (*pt_supported)(void);
1001 bool (*pku_supported)(void);
1002
1003 - int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
1004 + int (*check_nested_events)(struct kvm_vcpu *vcpu);
1005 void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1006
1007 void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
1008 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
1009 index 0bc530c4eb13..1e6bb4c25334 100644
1010 --- a/arch/x86/include/asm/pgtable.h
1011 +++ b/arch/x86/include/asm/pgtable.h
1012 @@ -624,12 +624,15 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1013 return __pmd(val);
1014 }
1015
1016 -/* mprotect needs to preserve PAT bits when updating vm_page_prot */
1017 +/*
1018 + * mprotect needs to preserve PAT and encryption bits when updating
1019 + * vm_page_prot
1020 + */
1021 #define pgprot_modify pgprot_modify
1022 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
1023 {
1024 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
1025 - pgprotval_t addbits = pgprot_val(newprot);
1026 + pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
1027 return __pgprot(preservebits | addbits);
1028 }
1029
1030 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
1031 index b5e49e6bac63..8267dd426b15 100644
1032 --- a/arch/x86/include/asm/pgtable_types.h
1033 +++ b/arch/x86/include/asm/pgtable_types.h
1034 @@ -123,7 +123,7 @@
1035 */
1036 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
1037 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
1038 - _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
1039 + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC)
1040 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
1041
1042 /*
1043 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
1044 index 04205ce127a1..f9e84a0e2fa2 100644
1045 --- a/arch/x86/kernel/acpi/boot.c
1046 +++ b/arch/x86/kernel/acpi/boot.c
1047 @@ -1740,7 +1740,7 @@ int __acpi_acquire_global_lock(unsigned int *lock)
1048 new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
1049 val = cmpxchg(lock, old, new);
1050 } while (unlikely (val != old));
1051 - return (new < 3) ? -1 : 0;
1052 + return ((new & 0x3) < 3) ? -1 : 0;
1053 }
1054
1055 int __acpi_release_global_lock(unsigned int *lock)
1056 diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
1057 index e0cbe4f2af49..c65adaf81384 100644
1058 --- a/arch/x86/kernel/tsc_msr.c
1059 +++ b/arch/x86/kernel/tsc_msr.c
1060 @@ -15,18 +15,46 @@
1061 #include <asm/param.h>
1062 #include <asm/tsc.h>
1063
1064 -#define MAX_NUM_FREQS 9
1065 +#define MAX_NUM_FREQS 16 /* 4 bits to select the frequency */
1066 +
1067 +/*
1068 + * The frequency numbers in the SDM are e.g. 83.3 MHz, which does not contain a
1069 + * lot of accuracy which leads to clock drift. As far as we know Bay Trail SoCs
1070 + * use a 25 MHz crystal and Cherry Trail uses a 19.2 MHz crystal, the crystal
1071 + * is the source clk for a root PLL which outputs 1600 and 100 MHz. It is
1072 + * unclear if the root PLL outputs are used directly by the CPU clock PLL or
1073 + * if there is another PLL in between.
1074 + * This does not matter though, we can model the chain of PLLs as a single PLL
1075 + * with a quotient equal to the quotients of all PLLs in the chain multiplied.
1076 + * So we can create a simplified model of the CPU clock setup using a reference
1077 + * clock of 100 MHz plus a quotient which gets us as close to the frequency
1078 + * from the SDM as possible.
1079 + * For the 83.3 MHz example from above this would give us 100 MHz * 5 / 6 =
1080 + * 83 and 1/3 MHz, which matches exactly what has been measured on actual hw.
1081 + */
1082 +#define TSC_REFERENCE_KHZ 100000
1083 +
1084 +struct muldiv {
1085 + u32 multiplier;
1086 + u32 divider;
1087 +};
1088
1089 /*
1090 * If MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be
1091 * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40].
1092 * Unfortunately some Intel Atom SoCs aren't quite compliant to this,
1093 * so we need manually differentiate SoC families. This is what the
1094 - * field msr_plat does.
1095 + * field use_msr_plat does.
1096 */
1097 struct freq_desc {
1098 - u8 msr_plat; /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */
1099 + bool use_msr_plat;
1100 + struct muldiv muldiv[MAX_NUM_FREQS];
1101 + /*
1102 + * Some CPU frequencies in the SDM do not map to known PLL freqs, in
1103 + * that case the muldiv array is empty and the freqs array is used.
1104 + */
1105 u32 freqs[MAX_NUM_FREQS];
1106 + u32 mask;
1107 };
1108
1109 /*
1110 @@ -35,31 +63,81 @@ struct freq_desc {
1111 * by MSR based on SDM.
1112 */
1113 static const struct freq_desc freq_desc_pnw = {
1114 - 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 }
1115 + .use_msr_plat = false,
1116 + .freqs = { 0, 0, 0, 0, 0, 99840, 0, 83200 },
1117 + .mask = 0x07,
1118 };
1119
1120 static const struct freq_desc freq_desc_clv = {
1121 - 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 }
1122 + .use_msr_plat = false,
1123 + .freqs = { 0, 133200, 0, 0, 0, 99840, 0, 83200 },
1124 + .mask = 0x07,
1125 };
1126
1127 +/*
1128 + * Bay Trail SDM MSR_FSB_FREQ frequencies simplified PLL model:
1129 + * 000: 100 * 5 / 6 = 83.3333 MHz
1130 + * 001: 100 * 1 / 1 = 100.0000 MHz
1131 + * 010: 100 * 4 / 3 = 133.3333 MHz
1132 + * 011: 100 * 7 / 6 = 116.6667 MHz
1133 + * 100: 100 * 4 / 5 = 80.0000 MHz
1134 + */
1135 static const struct freq_desc freq_desc_byt = {
1136 - 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 }
1137 + .use_msr_plat = true,
1138 + .muldiv = { { 5, 6 }, { 1, 1 }, { 4, 3 }, { 7, 6 },
1139 + { 4, 5 } },
1140 + .mask = 0x07,
1141 };
1142
1143 +/*
1144 + * Cherry Trail SDM MSR_FSB_FREQ frequencies simplified PLL model:
1145 + * 0000: 100 * 5 / 6 = 83.3333 MHz
1146 + * 0001: 100 * 1 / 1 = 100.0000 MHz
1147 + * 0010: 100 * 4 / 3 = 133.3333 MHz
1148 + * 0011: 100 * 7 / 6 = 116.6667 MHz
1149 + * 0100: 100 * 4 / 5 = 80.0000 MHz
1150 + * 0101: 100 * 14 / 15 = 93.3333 MHz
1151 + * 0110: 100 * 9 / 10 = 90.0000 MHz
1152 + * 0111: 100 * 8 / 9 = 88.8889 MHz
1153 + * 1000: 100 * 7 / 8 = 87.5000 MHz
1154 + */
1155 static const struct freq_desc freq_desc_cht = {
1156 - 1, { 83300, 100000, 133300, 116700, 80000, 93300, 90000, 88900, 87500 }
1157 + .use_msr_plat = true,
1158 + .muldiv = { { 5, 6 }, { 1, 1 }, { 4, 3 }, { 7, 6 },
1159 + { 4, 5 }, { 14, 15 }, { 9, 10 }, { 8, 9 },
1160 + { 7, 8 } },
1161 + .mask = 0x0f,
1162 };
1163
1164 +/*
1165 + * Merriefield SDM MSR_FSB_FREQ frequencies simplified PLL model:
1166 + * 0001: 100 * 1 / 1 = 100.0000 MHz
1167 + * 0010: 100 * 4 / 3 = 133.3333 MHz
1168 + */
1169 static const struct freq_desc freq_desc_tng = {
1170 - 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 }
1171 + .use_msr_plat = true,
1172 + .muldiv = { { 0, 0 }, { 1, 1 }, { 4, 3 } },
1173 + .mask = 0x07,
1174 };
1175
1176 +/*
1177 + * Moorefield SDM MSR_FSB_FREQ frequencies simplified PLL model:
1178 + * 0000: 100 * 5 / 6 = 83.3333 MHz
1179 + * 0001: 100 * 1 / 1 = 100.0000 MHz
1180 + * 0010: 100 * 4 / 3 = 133.3333 MHz
1181 + * 0011: 100 * 1 / 1 = 100.0000 MHz
1182 + */
1183 static const struct freq_desc freq_desc_ann = {
1184 - 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 }
1185 + .use_msr_plat = true,
1186 + .muldiv = { { 5, 6 }, { 1, 1 }, { 4, 3 }, { 1, 1 } },
1187 + .mask = 0x0f,
1188 };
1189
1190 +/* 24 MHz crystal? : 24 * 13 / 4 = 78 MHz */
1191 static const struct freq_desc freq_desc_lgm = {
1192 - 1, { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 }
1193 + .use_msr_plat = true,
1194 + .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 },
1195 + .mask = 0x0f,
1196 };
1197
1198 static const struct x86_cpu_id tsc_msr_cpu_ids[] = {
1199 @@ -81,17 +159,19 @@ static const struct x86_cpu_id tsc_msr_cpu_ids[] = {
1200 */
1201 unsigned long cpu_khz_from_msr(void)
1202 {
1203 - u32 lo, hi, ratio, freq;
1204 + u32 lo, hi, ratio, freq, tscref;
1205 const struct freq_desc *freq_desc;
1206 const struct x86_cpu_id *id;
1207 + const struct muldiv *md;
1208 unsigned long res;
1209 + int index;
1210
1211 id = x86_match_cpu(tsc_msr_cpu_ids);
1212 if (!id)
1213 return 0;
1214
1215 freq_desc = (struct freq_desc *)id->driver_data;
1216 - if (freq_desc->msr_plat) {
1217 + if (freq_desc->use_msr_plat) {
1218 rdmsr(MSR_PLATFORM_INFO, lo, hi);
1219 ratio = (lo >> 8) & 0xff;
1220 } else {
1221 @@ -101,12 +181,28 @@ unsigned long cpu_khz_from_msr(void)
1222
1223 /* Get FSB FREQ ID */
1224 rdmsr(MSR_FSB_FREQ, lo, hi);
1225 + index = lo & freq_desc->mask;
1226 + md = &freq_desc->muldiv[index];
1227
1228 - /* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */
1229 - freq = freq_desc->freqs[lo & 0x7];
1230 + /*
1231 + * Note this also catches cases where the index points to an unpopulated
1232 + * part of muldiv, in that case the else will set freq and res to 0.
1233 + */
1234 + if (md->divider) {
1235 + tscref = TSC_REFERENCE_KHZ * md->multiplier;
1236 + freq = DIV_ROUND_CLOSEST(tscref, md->divider);
1237 + /*
1238 + * Multiplying by ratio before the division has better
1239 + * accuracy than just calculating freq * ratio.
1240 + */
1241 + res = DIV_ROUND_CLOSEST(tscref * ratio, md->divider);
1242 + } else {
1243 + freq = freq_desc->freqs[index];
1244 + res = freq * ratio;
1245 + }
1246
1247 - /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
1248 - res = freq * ratio;
1249 + if (freq == 0)
1250 + pr_err("Error MSR_FSB_FREQ index %d is unknown\n", index);
1251
1252 #ifdef CONFIG_X86_LOCAL_APIC
1253 lapic_timer_period = (freq * 1000) / HZ;
1254 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1255 index 07459120a222..51ff6b342279 100644
1256 --- a/arch/x86/kvm/svm.c
1257 +++ b/arch/x86/kvm/svm.c
1258 @@ -1926,6 +1926,10 @@ static struct kvm *svm_vm_alloc(void)
1259 struct kvm_svm *kvm_svm = __vmalloc(sizeof(struct kvm_svm),
1260 GFP_KERNEL_ACCOUNT | __GFP_ZERO,
1261 PAGE_KERNEL);
1262 +
1263 + if (!kvm_svm)
1264 + return NULL;
1265 +
1266 return &kvm_svm->kvm;
1267 }
1268
1269 diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1270 index 2b44554baf28..4a09f40b24dc 100644
1271 --- a/arch/x86/kvm/vmx/nested.c
1272 +++ b/arch/x86/kvm/vmx/nested.c
1273 @@ -3460,7 +3460,7 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
1274 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
1275 }
1276
1277 -static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
1278 +static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
1279 {
1280 struct vcpu_vmx *vmx = to_vmx(vcpu);
1281 unsigned long exit_qual;
1282 @@ -3507,8 +3507,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
1283 return 0;
1284 }
1285
1286 - if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
1287 - nested_exit_on_intr(vcpu)) {
1288 + if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(vcpu)) {
1289 if (block_nested_events)
1290 return -EBUSY;
1291 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
1292 @@ -4158,17 +4157,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1293 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1294
1295 if (likely(!vmx->fail)) {
1296 - /*
1297 - * TODO: SDM says that with acknowledge interrupt on
1298 - * exit, bit 31 of the VM-exit interrupt information
1299 - * (valid interrupt) is always set to 1 on
1300 - * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
1301 - * need kvm_cpu_has_interrupt(). See the commit
1302 - * message for details.
1303 - */
1304 - if (nested_exit_intr_ack_set(vcpu) &&
1305 - exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
1306 - kvm_cpu_has_interrupt(vcpu)) {
1307 + if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
1308 + nested_exit_intr_ack_set(vcpu)) {
1309 int irq = kvm_cpu_get_interrupt(vcpu);
1310 WARN_ON(irq < 0);
1311 vmcs12->vm_exit_intr_info = irq |
1312 diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h
1313 index 45eaedee2ac0..09b0937d56b1 100644
1314 --- a/arch/x86/kvm/vmx/ops.h
1315 +++ b/arch/x86/kvm/vmx/ops.h
1316 @@ -12,7 +12,8 @@
1317
1318 #define __ex(x) __kvm_handle_fault_on_reboot(x)
1319
1320 -asmlinkage void vmread_error(unsigned long field, bool fault);
1321 +__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
1322 + bool fault);
1323 void vmwrite_error(unsigned long field, unsigned long value);
1324 void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
1325 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
1326 @@ -70,15 +71,28 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
1327 asm volatile("1: vmread %2, %1\n\t"
1328 ".byte 0x3e\n\t" /* branch taken hint */
1329 "ja 3f\n\t"
1330 - "mov %2, %%" _ASM_ARG1 "\n\t"
1331 - "xor %%" _ASM_ARG2 ", %%" _ASM_ARG2 "\n\t"
1332 - "2: call vmread_error\n\t"
1333 - "xor %k1, %k1\n\t"
1334 +
1335 + /*
1336 + * VMREAD failed. Push '0' for @fault, push the failing
1337 + * @field, and bounce through the trampoline to preserve
1338 + * volatile registers.
1339 + */
1340 + "push $0\n\t"
1341 + "push %2\n\t"
1342 + "2:call vmread_error_trampoline\n\t"
1343 +
1344 + /*
1345 + * Unwind the stack. Note, the trampoline zeros out the
1346 + * memory for @fault so that the result is '0' on error.
1347 + */
1348 + "pop %2\n\t"
1349 + "pop %1\n\t"
1350 "3:\n\t"
1351
1352 + /* VMREAD faulted. As above, except push '1' for @fault. */
1353 ".pushsection .fixup, \"ax\"\n\t"
1354 - "4: mov %2, %%" _ASM_ARG1 "\n\t"
1355 - "mov $1, %%" _ASM_ARG2 "\n\t"
1356 + "4: push $1\n\t"
1357 + "push %2\n\t"
1358 "jmp 2b\n\t"
1359 ".popsection\n\t"
1360 _ASM_EXTABLE(1b, 4b)
1361 diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
1362 index 751a384c2eb0..c7012f6c971c 100644
1363 --- a/arch/x86/kvm/vmx/vmenter.S
1364 +++ b/arch/x86/kvm/vmx/vmenter.S
1365 @@ -234,3 +234,61 @@ ENTRY(__vmx_vcpu_run)
1366 2: mov $1, %eax
1367 jmp 1b
1368 ENDPROC(__vmx_vcpu_run)
1369 +
1370 +/**
1371 + * vmread_error_trampoline - Trampoline from inline asm to vmread_error()
1372 + * @field: VMCS field encoding that failed
1373 + * @fault: %true if the VMREAD faulted, %false if it failed
1374 +
1375 + * Save and restore volatile registers across a call to vmread_error(). Note,
1376 + * all parameters are passed on the stack.
1377 + */
1378 +ENTRY(vmread_error_trampoline)
1379 + push %_ASM_BP
1380 + mov %_ASM_SP, %_ASM_BP
1381 +
1382 + push %_ASM_AX
1383 + push %_ASM_CX
1384 + push %_ASM_DX
1385 +#ifdef CONFIG_X86_64
1386 + push %rdi
1387 + push %rsi
1388 + push %r8
1389 + push %r9
1390 + push %r10
1391 + push %r11
1392 +#endif
1393 +#ifdef CONFIG_X86_64
1394 + /* Load @field and @fault to arg1 and arg2 respectively. */
1395 + mov 3*WORD_SIZE(%rbp), %_ASM_ARG2
1396 + mov 2*WORD_SIZE(%rbp), %_ASM_ARG1
1397 +#else
1398 + /* Parameters are passed on the stack for 32-bit (see asmlinkage). */
1399 + push 3*WORD_SIZE(%ebp)
1400 + push 2*WORD_SIZE(%ebp)
1401 +#endif
1402 +
1403 + call vmread_error
1404 +
1405 +#ifndef CONFIG_X86_64
1406 + add $8, %esp
1407 +#endif
1408 +
1409 + /* Zero out @fault, which will be popped into the result register. */
1410 + _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
1411 +
1412 +#ifdef CONFIG_X86_64
1413 + pop %r11
1414 + pop %r10
1415 + pop %r9
1416 + pop %r8
1417 + pop %rsi
1418 + pop %rdi
1419 +#endif
1420 + pop %_ASM_DX
1421 + pop %_ASM_CX
1422 + pop %_ASM_AX
1423 + pop %_ASM_BP
1424 +
1425 + ret
1426 +ENDPROC(vmread_error_trampoline)
1427 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
1428 index 8129b6b27c93..39a116d43993 100644
1429 --- a/arch/x86/kvm/vmx/vmx.c
1430 +++ b/arch/x86/kvm/vmx/vmx.c
1431 @@ -648,43 +648,15 @@ void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1432 }
1433
1434 #ifdef CONFIG_KEXEC_CORE
1435 -/*
1436 - * This bitmap is used to indicate whether the vmclear
1437 - * operation is enabled on all cpus. All disabled by
1438 - * default.
1439 - */
1440 -static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1441 -
1442 -static inline void crash_enable_local_vmclear(int cpu)
1443 -{
1444 - cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1445 -}
1446 -
1447 -static inline void crash_disable_local_vmclear(int cpu)
1448 -{
1449 - cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1450 -}
1451 -
1452 -static inline int crash_local_vmclear_enabled(int cpu)
1453 -{
1454 - return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1455 -}
1456 -
1457 static void crash_vmclear_local_loaded_vmcss(void)
1458 {
1459 int cpu = raw_smp_processor_id();
1460 struct loaded_vmcs *v;
1461
1462 - if (!crash_local_vmclear_enabled(cpu))
1463 - return;
1464 -
1465 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1466 loaded_vmcss_on_cpu_link)
1467 vmcs_clear(v->vmcs);
1468 }
1469 -#else
1470 -static inline void crash_enable_local_vmclear(int cpu) { }
1471 -static inline void crash_disable_local_vmclear(int cpu) { }
1472 #endif /* CONFIG_KEXEC_CORE */
1473
1474 static void __loaded_vmcs_clear(void *arg)
1475 @@ -696,19 +668,24 @@ static void __loaded_vmcs_clear(void *arg)
1476 return; /* vcpu migration can race with cpu offline */
1477 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1478 per_cpu(current_vmcs, cpu) = NULL;
1479 - crash_disable_local_vmclear(cpu);
1480 +
1481 + vmcs_clear(loaded_vmcs->vmcs);
1482 + if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1483 + vmcs_clear(loaded_vmcs->shadow_vmcs);
1484 +
1485 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1486
1487 /*
1488 - * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1489 - * is before setting loaded_vmcs->vcpu to -1 which is done in
1490 - * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1491 - * then adds the vmcs into percpu list before it is deleted.
1492 + * Ensure all writes to loaded_vmcs, including deleting it from its
1493 + * current percpu list, complete before setting loaded_vmcs->vcpu to
1494 + * -1, otherwise a different cpu can see vcpu == -1 first and add
1495 + * loaded_vmcs to its percpu list before it's deleted from this cpu's
1496 + * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
1497 */
1498 smp_wmb();
1499
1500 - loaded_vmcs_init(loaded_vmcs);
1501 - crash_enable_local_vmclear(cpu);
1502 + loaded_vmcs->cpu = -1;
1503 + loaded_vmcs->launched = 0;
1504 }
1505
1506 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1507 @@ -1317,18 +1294,17 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
1508 if (!already_loaded) {
1509 loaded_vmcs_clear(vmx->loaded_vmcs);
1510 local_irq_disable();
1511 - crash_disable_local_vmclear(cpu);
1512
1513 /*
1514 - * Read loaded_vmcs->cpu should be before fetching
1515 - * loaded_vmcs->loaded_vmcss_on_cpu_link.
1516 - * See the comments in __loaded_vmcs_clear().
1517 + * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
1518 + * this cpu's percpu list, otherwise it may not yet be deleted
1519 + * from its previous cpu's percpu list. Pairs with the
1520 + * smb_wmb() in __loaded_vmcs_clear().
1521 */
1522 smp_rmb();
1523
1524 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1525 &per_cpu(loaded_vmcss_on_cpu, cpu));
1526 - crash_enable_local_vmclear(cpu);
1527 local_irq_enable();
1528 }
1529
1530 @@ -2252,21 +2228,6 @@ static int hardware_enable(void)
1531 !hv_get_vp_assist_page(cpu))
1532 return -EFAULT;
1533
1534 - INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
1535 - INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
1536 - spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
1537 -
1538 - /*
1539 - * Now we can enable the vmclear operation in kdump
1540 - * since the loaded_vmcss_on_cpu list on this cpu
1541 - * has been initialized.
1542 - *
1543 - * Though the cpu is not in VMX operation now, there
1544 - * is no problem to enable the vmclear operation
1545 - * for the loaded_vmcss_on_cpu list is empty!
1546 - */
1547 - crash_enable_local_vmclear(cpu);
1548 -
1549 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
1550
1551 test_bits = FEATURE_CONTROL_LOCKED;
1552 @@ -4505,8 +4466,13 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
1553
1554 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
1555 {
1556 - return (!to_vmx(vcpu)->nested.nested_run_pending &&
1557 - vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1558 + if (to_vmx(vcpu)->nested.nested_run_pending)
1559 + return false;
1560 +
1561 + if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
1562 + return true;
1563 +
1564 + return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1565 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
1566 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
1567 }
1568 @@ -6684,6 +6650,10 @@ static struct kvm *vmx_vm_alloc(void)
1569 struct kvm_vmx *kvm_vmx = __vmalloc(sizeof(struct kvm_vmx),
1570 GFP_KERNEL_ACCOUNT | __GFP_ZERO,
1571 PAGE_KERNEL);
1572 +
1573 + if (!kvm_vmx)
1574 + return NULL;
1575 +
1576 return &kvm_vmx->kvm;
1577 }
1578
1579 @@ -8022,7 +7992,7 @@ module_exit(vmx_exit);
1580
1581 static int __init vmx_init(void)
1582 {
1583 - int r;
1584 + int r, cpu;
1585
1586 #if IS_ENABLED(CONFIG_HYPERV)
1587 /*
1588 @@ -8076,6 +8046,12 @@ static int __init vmx_init(void)
1589 return r;
1590 }
1591
1592 + for_each_possible_cpu(cpu) {
1593 + INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
1594 + INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
1595 + spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
1596 + }
1597 +
1598 #ifdef CONFIG_KEXEC_CORE
1599 rcu_assign_pointer(crash_vmclear_loaded_vmcss,
1600 crash_vmclear_local_loaded_vmcss);
1601 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1602 index c5e15eba8052..56a0f9c18892 100644
1603 --- a/arch/x86/kvm/x86.c
1604 +++ b/arch/x86/kvm/x86.c
1605 @@ -7555,7 +7555,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
1606 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
1607 }
1608
1609 -static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
1610 +static int inject_pending_event(struct kvm_vcpu *vcpu)
1611 {
1612 int r;
1613
1614 @@ -7591,7 +7591,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
1615 * from L2 to L1.
1616 */
1617 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
1618 - r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
1619 + r = kvm_x86_ops->check_nested_events(vcpu);
1620 if (r != 0)
1621 return r;
1622 }
1623 @@ -7653,7 +7653,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
1624 * KVM_REQ_EVENT only on certain events and not unconditionally?
1625 */
1626 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
1627 - r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
1628 + r = kvm_x86_ops->check_nested_events(vcpu);
1629 if (r != 0)
1630 return r;
1631 }
1632 @@ -8130,7 +8130,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1633 goto out;
1634 }
1635
1636 - if (inject_pending_event(vcpu, req_int_win) != 0)
1637 + if (inject_pending_event(vcpu) != 0)
1638 req_immediate_exit = true;
1639 else {
1640 /* Enable SMI/NMI/IRQ window open exits if needed.
1641 @@ -8360,7 +8360,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
1642 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
1643 {
1644 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
1645 - kvm_x86_ops->check_nested_events(vcpu, false);
1646 + kvm_x86_ops->check_nested_events(vcpu);
1647
1648 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
1649 !vcpu->arch.apf.halted);
1650 @@ -9726,6 +9726,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1651 {
1652 int i;
1653
1654 + /*
1655 + * Clear out the previous array pointers for the KVM_MR_MOVE case. The
1656 + * old arrays will be freed by __kvm_set_memory_region() if installing
1657 + * the new memslot is successful.
1658 + */
1659 + memset(&slot->arch, 0, sizeof(slot->arch));
1660 +
1661 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
1662 struct kvm_lpage_info *linfo;
1663 unsigned long ugfn;
1664 @@ -9807,6 +9814,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1665 const struct kvm_userspace_memory_region *mem,
1666 enum kvm_mr_change change)
1667 {
1668 + if (change == KVM_MR_MOVE)
1669 + return kvm_arch_create_memslot(kvm, memslot,
1670 + mem->memory_size >> PAGE_SHIFT);
1671 +
1672 return 0;
1673 }
1674
1675 diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
1676 index 01d7ca492741..ae1c5baf27cd 100644
1677 --- a/arch/x86/platform/efi/efi.c
1678 +++ b/arch/x86/platform/efi/efi.c
1679 @@ -85,6 +85,8 @@ static const unsigned long * const efi_tables[] = {
1680 #ifdef CONFIG_EFI_RCI2_TABLE
1681 &rci2_table_phys,
1682 #endif
1683 + &efi.tpm_log,
1684 + &efi.tpm_final_log,
1685 };
1686
1687 u64 efi_setup; /* efi setup_data physical address */
1688 diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
1689 index fe0e647411da..e39c930cfbd1 100644
1690 --- a/arch/x86/platform/efi/efi_64.c
1691 +++ b/arch/x86/platform/efi/efi_64.c
1692 @@ -834,7 +834,7 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
1693 phys_vendor = virt_to_phys_or_null(vnd);
1694 phys_data = virt_to_phys_or_null_size(data, data_size);
1695
1696 - if (!phys_name || !phys_data)
1697 + if (!phys_name || (data && !phys_data))
1698 status = EFI_INVALID_PARAMETER;
1699 else
1700 status = efi_thunk(set_variable, phys_name, phys_vendor,
1701 @@ -865,7 +865,7 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
1702 phys_vendor = virt_to_phys_or_null(vnd);
1703 phys_data = virt_to_phys_or_null_size(data, data_size);
1704
1705 - if (!phys_name || !phys_data)
1706 + if (!phys_name || (data && !phys_data))
1707 status = EFI_INVALID_PARAMETER;
1708 else
1709 status = efi_thunk(set_variable, phys_name, phys_vendor,
1710 diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
1711 index 86cd718e0380..5611769e1569 100644
1712 --- a/block/bfq-cgroup.c
1713 +++ b/block/bfq-cgroup.c
1714 @@ -625,6 +625,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1715 {
1716 struct bfq_entity *entity = &bfqq->entity;
1717
1718 + /*
1719 + * Get extra reference to prevent bfqq from being freed in
1720 + * next possible expire or deactivate.
1721 + */
1722 + bfqq->ref++;
1723 +
1724 /* If bfqq is empty, then bfq_bfqq_expire also invokes
1725 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
1726 * from data structures related to current group. Otherwise we
1727 @@ -635,12 +641,6 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1728 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1729 false, BFQQE_PREEMPTED);
1730
1731 - /*
1732 - * get extra reference to prevent bfqq from being freed in
1733 - * next possible deactivate
1734 - */
1735 - bfqq->ref++;
1736 -
1737 if (bfq_bfqq_busy(bfqq))
1738 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
1739 else if (entity->on_st)
1740 @@ -660,7 +660,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1741
1742 if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
1743 bfq_schedule_dispatch(bfqd);
1744 - /* release extra ref taken above */
1745 + /* release extra ref taken above, bfqq may happen to be freed now */
1746 bfq_put_queue(bfqq);
1747 }
1748
1749 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1750 index 48189ff88916..5a825f9f1ea0 100644
1751 --- a/block/bfq-iosched.c
1752 +++ b/block/bfq-iosched.c
1753 @@ -6210,20 +6210,28 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
1754 return bfqq;
1755 }
1756
1757 -static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
1758 +static void
1759 +bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1760 {
1761 - struct bfq_data *bfqd = bfqq->bfqd;
1762 enum bfqq_expiration reason;
1763 unsigned long flags;
1764
1765 spin_lock_irqsave(&bfqd->lock, flags);
1766 - bfq_clear_bfqq_wait_request(bfqq);
1767
1768 + /*
1769 + * Considering that bfqq may be in race, we should firstly check
1770 + * whether bfqq is in service before doing something on it. If
1771 + * the bfqq in race is not in service, it has already been expired
1772 + * through __bfq_bfqq_expire func and its wait_request flags has
1773 + * been cleared in __bfq_bfqd_reset_in_service func.
1774 + */
1775 if (bfqq != bfqd->in_service_queue) {
1776 spin_unlock_irqrestore(&bfqd->lock, flags);
1777 return;
1778 }
1779
1780 + bfq_clear_bfqq_wait_request(bfqq);
1781 +
1782 if (bfq_bfqq_budget_timeout(bfqq))
1783 /*
1784 * Also here the queue can be safely expired
1785 @@ -6268,7 +6276,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
1786 * early.
1787 */
1788 if (bfqq)
1789 - bfq_idle_slice_timer_body(bfqq);
1790 + bfq_idle_slice_timer_body(bfqd, bfqq);
1791
1792 return HRTIMER_NORESTART;
1793 }
1794 diff --git a/block/blk-ioc.c b/block/blk-ioc.c
1795 index 5ed59ac6ae58..9df50fb507ca 100644
1796 --- a/block/blk-ioc.c
1797 +++ b/block/blk-ioc.c
1798 @@ -84,6 +84,7 @@ static void ioc_destroy_icq(struct io_cq *icq)
1799 * making it impossible to determine icq_cache. Record it in @icq.
1800 */
1801 icq->__rcu_icq_cache = et->icq_cache;
1802 + icq->flags |= ICQ_DESTROYED;
1803 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
1804 }
1805
1806 @@ -212,15 +213,21 @@ static void __ioc_clear_queue(struct list_head *icq_list)
1807 {
1808 unsigned long flags;
1809
1810 + rcu_read_lock();
1811 while (!list_empty(icq_list)) {
1812 struct io_cq *icq = list_entry(icq_list->next,
1813 struct io_cq, q_node);
1814 struct io_context *ioc = icq->ioc;
1815
1816 spin_lock_irqsave(&ioc->lock, flags);
1817 + if (icq->flags & ICQ_DESTROYED) {
1818 + spin_unlock_irqrestore(&ioc->lock, flags);
1819 + continue;
1820 + }
1821 ioc_destroy_icq(icq);
1822 spin_unlock_irqrestore(&ioc->lock, flags);
1823 }
1824 + rcu_read_unlock();
1825 }
1826
1827 /**
1828 diff --git a/block/blk-settings.c b/block/blk-settings.c
1829 index c8eda2e7b91e..be1dca0103a4 100644
1830 --- a/block/blk-settings.c
1831 +++ b/block/blk-settings.c
1832 @@ -664,6 +664,9 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1833 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
1834 top, bottom);
1835 }
1836 +
1837 + t->backing_dev_info->io_pages =
1838 + t->limits.max_sectors >> (PAGE_SHIFT - 9);
1839 }
1840 EXPORT_SYMBOL(disk_stack_limits);
1841
1842 diff --git a/crypto/rng.c b/crypto/rng.c
1843 index 1e21231f71c9..1490d210f1a1 100644
1844 --- a/crypto/rng.c
1845 +++ b/crypto/rng.c
1846 @@ -37,12 +37,16 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
1847 crypto_stats_get(alg);
1848 if (!seed && slen) {
1849 buf = kmalloc(slen, GFP_KERNEL);
1850 - if (!buf)
1851 + if (!buf) {
1852 + crypto_alg_put(alg);
1853 return -ENOMEM;
1854 + }
1855
1856 err = get_random_bytes_wait(buf, slen);
1857 - if (err)
1858 + if (err) {
1859 + crypto_alg_put(alg);
1860 goto out;
1861 + }
1862 seed = buf;
1863 }
1864
1865 diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
1866 index a74c1a0e892d..c0e243668261 100644
1867 --- a/drivers/acpi/acpica/achware.h
1868 +++ b/drivers/acpi/acpica/achware.h
1869 @@ -101,7 +101,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
1870
1871 acpi_status acpi_hw_enable_all_wakeup_gpes(void);
1872
1873 -u8 acpi_hw_check_all_gpes(void);
1874 +u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number);
1875
1876 acpi_status
1877 acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
1878 diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
1879 index 84b0b410310e..3e1813ebcca8 100644
1880 --- a/drivers/acpi/acpica/evxfgpe.c
1881 +++ b/drivers/acpi/acpica/evxfgpe.c
1882 @@ -799,17 +799,19 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
1883 *
1884 * FUNCTION: acpi_any_gpe_status_set
1885 *
1886 - * PARAMETERS: None
1887 + * PARAMETERS: gpe_skip_number - Number of the GPE to skip
1888 *
1889 * RETURN: Whether or not the status bit is set for any GPE
1890 *
1891 - * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any
1892 - * of them is set or FALSE otherwise.
1893 + * DESCRIPTION: Check the status bits of all enabled GPEs, except for the one
1894 + * represented by the "skip" argument, and return TRUE if any of
1895 + * them is set or FALSE otherwise.
1896 *
1897 ******************************************************************************/
1898 -u32 acpi_any_gpe_status_set(void)
1899 +u32 acpi_any_gpe_status_set(u32 gpe_skip_number)
1900 {
1901 acpi_status status;
1902 + acpi_handle gpe_device;
1903 u8 ret;
1904
1905 ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
1906 @@ -819,7 +821,12 @@ u32 acpi_any_gpe_status_set(void)
1907 return (FALSE);
1908 }
1909
1910 - ret = acpi_hw_check_all_gpes();
1911 + status = acpi_get_gpe_device(gpe_skip_number, &gpe_device);
1912 + if (ACPI_FAILURE(status)) {
1913 + gpe_device = NULL;
1914 + }
1915 +
1916 + ret = acpi_hw_check_all_gpes(gpe_device, gpe_skip_number);
1917 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
1918
1919 return (ret);
1920 diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
1921 index b1d7d5f92495..12516b07336e 100644
1922 --- a/drivers/acpi/acpica/hwgpe.c
1923 +++ b/drivers/acpi/acpica/hwgpe.c
1924 @@ -444,12 +444,19 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
1925 return (AE_OK);
1926 }
1927
1928 +struct acpi_gpe_block_status_context {
1929 + struct acpi_gpe_register_info *gpe_skip_register_info;
1930 + u8 gpe_skip_mask;
1931 + u8 retval;
1932 +};
1933 +
1934 /******************************************************************************
1935 *
1936 * FUNCTION: acpi_hw_get_gpe_block_status
1937 *
1938 * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
1939 * gpe_block - Gpe Block info
1940 + * context - GPE list walk context data
1941 *
1942 * RETURN: Success
1943 *
1944 @@ -460,12 +467,13 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
1945 static acpi_status
1946 acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
1947 struct acpi_gpe_block_info *gpe_block,
1948 - void *ret_ptr)
1949 + void *context)
1950 {
1951 + struct acpi_gpe_block_status_context *c = context;
1952 struct acpi_gpe_register_info *gpe_register_info;
1953 u64 in_enable, in_status;
1954 acpi_status status;
1955 - u8 *ret = ret_ptr;
1956 + u8 ret_mask;
1957 u32 i;
1958
1959 /* Examine each GPE Register within the block */
1960 @@ -485,7 +493,11 @@ acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
1961 continue;
1962 }
1963
1964 - *ret |= in_enable & in_status;
1965 + ret_mask = in_enable & in_status;
1966 + if (ret_mask && c->gpe_skip_register_info == gpe_register_info) {
1967 + ret_mask &= ~c->gpe_skip_mask;
1968 + }
1969 + c->retval |= ret_mask;
1970 }
1971
1972 return (AE_OK);
1973 @@ -561,24 +573,41 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
1974 *
1975 * FUNCTION: acpi_hw_check_all_gpes
1976 *
1977 - * PARAMETERS: None
1978 + * PARAMETERS: gpe_skip_device - GPE devoce of the GPE to skip
1979 + * gpe_skip_number - Number of the GPE to skip
1980 *
1981 * RETURN: Combined status of all GPEs
1982 *
1983 - * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the
1984 + * DESCRIPTION: Check all enabled GPEs in all GPE blocks, except for the one
1985 + * represented by the "skip" arguments, and return TRUE if the
1986 * status bit is set for at least one of them of FALSE otherwise.
1987 *
1988 ******************************************************************************/
1989
1990 -u8 acpi_hw_check_all_gpes(void)
1991 +u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number)
1992 {
1993 - u8 ret = 0;
1994 + struct acpi_gpe_block_status_context context = {
1995 + .gpe_skip_register_info = NULL,
1996 + .retval = 0,
1997 + };
1998 + struct acpi_gpe_event_info *gpe_event_info;
1999 + acpi_cpu_flags flags;
2000
2001 ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
2002
2003 - (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret);
2004 + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
2005 +
2006 + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_skip_device,
2007 + gpe_skip_number);
2008 + if (gpe_event_info) {
2009 + context.gpe_skip_register_info = gpe_event_info->register_info;
2010 + context.gpe_skip_mask = acpi_hw_get_gpe_register_bit(gpe_event_info);
2011 + }
2012 +
2013 + acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
2014
2015 - return (ret != 0);
2016 + (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &context);
2017 + return (context.retval != 0);
2018 }
2019
2020 #endif /* !ACPI_REDUCED_HARDWARE */
2021 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
2022 index ca5cdb621c2a..ed2ecb00004f 100644
2023 --- a/drivers/acpi/ec.c
2024 +++ b/drivers/acpi/ec.c
2025 @@ -1573,7 +1573,6 @@ static int acpi_ec_add(struct acpi_device *device)
2026
2027 if (boot_ec && ec->command_addr == boot_ec->command_addr &&
2028 ec->data_addr == boot_ec->data_addr) {
2029 - boot_ec_is_ecdt = false;
2030 /*
2031 * Trust PNP0C09 namespace location rather than
2032 * ECDT ID. But trust ECDT GPE rather than _GPE
2033 @@ -1593,9 +1592,12 @@ static int acpi_ec_add(struct acpi_device *device)
2034
2035 if (ec == boot_ec)
2036 acpi_handle_info(boot_ec->handle,
2037 - "Boot %s EC used to handle transactions and events\n",
2038 + "Boot %s EC initialization complete\n",
2039 boot_ec_is_ecdt ? "ECDT" : "DSDT");
2040
2041 + acpi_handle_info(ec->handle,
2042 + "EC: Used to handle transactions and events\n");
2043 +
2044 device->driver_data = ec;
2045
2046 ret = !!request_region(ec->data_addr, 1, "EC data");
2047 @@ -1962,6 +1964,11 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
2048 acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
2049 }
2050
2051 +bool acpi_ec_other_gpes_active(void)
2052 +{
2053 + return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX);
2054 +}
2055 +
2056 bool acpi_ec_dispatch_gpe(void)
2057 {
2058 u32 ret;
2059 diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
2060 index afe6636f9ad3..cbf7f34c3ce7 100644
2061 --- a/drivers/acpi/internal.h
2062 +++ b/drivers/acpi/internal.h
2063 @@ -201,6 +201,7 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
2064
2065 #ifdef CONFIG_PM_SLEEP
2066 void acpi_ec_flush_work(void);
2067 +bool acpi_ec_other_gpes_active(void);
2068 bool acpi_ec_dispatch_gpe(void);
2069 #endif
2070
2071 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
2072 index abd39cc5ff88..ce1d9048a36d 100644
2073 --- a/drivers/acpi/sleep.c
2074 +++ b/drivers/acpi/sleep.c
2075 @@ -1014,18 +1014,19 @@ static bool acpi_s2idle_wake(void)
2076 return true;
2077
2078 /*
2079 - * If there are no EC events to process and at least one of the
2080 - * other enabled GPEs is active, the wakeup is regarded as a
2081 - * genuine one.
2082 - *
2083 - * Note that the checks below must be carried out in this order
2084 - * to avoid returning prematurely due to a change of the EC GPE
2085 - * status bit from unset to set between the checks with the
2086 - * status bits of all the other GPEs unset.
2087 + * If the status bit is set for any enabled GPE other than the
2088 + * EC one, the wakeup is regarded as a genuine one.
2089 */
2090 - if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe())
2091 + if (acpi_ec_other_gpes_active())
2092 return true;
2093
2094 + /*
2095 + * If the EC GPE status bit has not been set, the wakeup is
2096 + * regarded as a spurious one.
2097 + */
2098 + if (!acpi_ec_dispatch_gpe())
2099 + return false;
2100 +
2101 /*
2102 * Cancel the wakeup and process all pending events in case
2103 * there are any wakeup ones in there.
2104 diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
2105 index 3ff14071617c..79f2aeeb482a 100644
2106 --- a/drivers/ata/libata-pmp.c
2107 +++ b/drivers/ata/libata-pmp.c
2108 @@ -763,6 +763,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
2109
2110 if (dev->flags & ATA_DFLAG_DETACH) {
2111 detach = 1;
2112 + rc = -ENODEV;
2113 goto fail;
2114 }
2115
2116 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
2117 index 58e09ffe8b9c..5af34a3201ed 100644
2118 --- a/drivers/ata/libata-scsi.c
2119 +++ b/drivers/ata/libata-scsi.c
2120 @@ -4553,22 +4553,19 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
2121 */
2122 shost->max_host_blocked = 1;
2123
2124 - rc = scsi_add_host_with_dma(ap->scsi_host,
2125 - &ap->tdev, ap->host->dev);
2126 + rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev);
2127 if (rc)
2128 - goto err_add;
2129 + goto err_alloc;
2130 }
2131
2132 return 0;
2133
2134 - err_add:
2135 - scsi_host_put(host->ports[i]->scsi_host);
2136 err_alloc:
2137 while (--i >= 0) {
2138 struct Scsi_Host *shost = host->ports[i]->scsi_host;
2139
2140 + /* scsi_host_put() is in ata_devres_release() */
2141 scsi_remove_host(shost);
2142 - scsi_host_put(shost);
2143 }
2144 return rc;
2145 }
2146 diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
2147 index 62ee90b4db56..70efbb22dfc3 100644
2148 --- a/drivers/base/firmware_loader/fallback.c
2149 +++ b/drivers/base/firmware_loader/fallback.c
2150 @@ -525,7 +525,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
2151 }
2152
2153 retval = fw_sysfs_wait_timeout(fw_priv, timeout);
2154 - if (retval < 0) {
2155 + if (retval < 0 && retval != -ENOENT) {
2156 mutex_lock(&fw_lock);
2157 fw_load_abort(fw_sysfs);
2158 mutex_unlock(&fw_lock);
2159 diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
2160 index cc85e87eaf05..8428d02cfe58 100644
2161 --- a/drivers/base/power/domain.c
2162 +++ b/drivers/base/power/domain.c
2163 @@ -2615,7 +2615,7 @@ static int genpd_iterate_idle_states(struct device_node *dn,
2164
2165 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2166 if (ret <= 0)
2167 - return ret;
2168 + return ret == -ENOENT ? 0 : ret;
2169
2170 /* Loop over the phandles until all the requested entry is found */
2171 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2172 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
2173 index 5817b51d2b15..0bd9b291bb29 100644
2174 --- a/drivers/base/power/wakeup.c
2175 +++ b/drivers/base/power/wakeup.c
2176 @@ -241,7 +241,9 @@ void wakeup_source_unregister(struct wakeup_source *ws)
2177 {
2178 if (ws) {
2179 wakeup_source_remove(ws);
2180 - wakeup_source_sysfs_remove(ws);
2181 + if (ws->dev)
2182 + wakeup_source_sysfs_remove(ws);
2183 +
2184 wakeup_source_destroy(ws);
2185 }
2186 }
2187 diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
2188 index 0e7da5015ccd..c4454cfc6d53 100644
2189 --- a/drivers/block/null_blk_main.c
2190 +++ b/drivers/block/null_blk_main.c
2191 @@ -579,6 +579,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
2192 if (tag != -1U) {
2193 cmd = &nq->cmds[tag];
2194 cmd->tag = tag;
2195 + cmd->error = BLK_STS_OK;
2196 cmd->nq = nq;
2197 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
2198 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
2199 @@ -1335,6 +1336,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
2200 cmd->timer.function = null_cmd_timer_expired;
2201 }
2202 cmd->rq = bd->rq;
2203 + cmd->error = BLK_STS_OK;
2204 cmd->nq = nq;
2205
2206 blk_mq_start_request(bd->rq);
2207 @@ -1382,7 +1384,12 @@ static void cleanup_queues(struct nullb *nullb)
2208
2209 static void null_del_dev(struct nullb *nullb)
2210 {
2211 - struct nullb_device *dev = nullb->dev;
2212 + struct nullb_device *dev;
2213 +
2214 + if (!nullb)
2215 + return;
2216 +
2217 + dev = nullb->dev;
2218
2219 ida_simple_remove(&nullb_indexes, nullb->index);
2220
2221 @@ -1736,6 +1743,7 @@ out_cleanup_queues:
2222 cleanup_queues(nullb);
2223 out_free_nullb:
2224 kfree(nullb);
2225 + dev->nullb = NULL;
2226 out:
2227 return rv;
2228 }
2229 diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
2230 index c02be06c5299..ab5482202cfb 100644
2231 --- a/drivers/block/xen-blkfront.c
2232 +++ b/drivers/block/xen-blkfront.c
2233 @@ -47,6 +47,7 @@
2234 #include <linux/bitmap.h>
2235 #include <linux/list.h>
2236 #include <linux/workqueue.h>
2237 +#include <linux/sched/mm.h>
2238
2239 #include <xen/xen.h>
2240 #include <xen/xenbus.h>
2241 @@ -2188,10 +2189,12 @@ static void blkfront_setup_discard(struct blkfront_info *info)
2242
2243 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2244 {
2245 - unsigned int psegs, grants;
2246 + unsigned int psegs, grants, memflags;
2247 int err, i;
2248 struct blkfront_info *info = rinfo->dev_info;
2249
2250 + memflags = memalloc_noio_save();
2251 +
2252 if (info->max_indirect_segments == 0) {
2253 if (!HAS_EXTRA_REQ)
2254 grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2255 @@ -2223,7 +2226,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2256
2257 BUG_ON(!list_empty(&rinfo->indirect_pages));
2258 for (i = 0; i < num; i++) {
2259 - struct page *indirect_page = alloc_page(GFP_NOIO);
2260 + struct page *indirect_page = alloc_page(GFP_KERNEL);
2261 if (!indirect_page)
2262 goto out_of_memory;
2263 list_add(&indirect_page->lru, &rinfo->indirect_pages);
2264 @@ -2234,15 +2237,15 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2265 rinfo->shadow[i].grants_used =
2266 kvcalloc(grants,
2267 sizeof(rinfo->shadow[i].grants_used[0]),
2268 - GFP_NOIO);
2269 + GFP_KERNEL);
2270 rinfo->shadow[i].sg = kvcalloc(psegs,
2271 sizeof(rinfo->shadow[i].sg[0]),
2272 - GFP_NOIO);
2273 + GFP_KERNEL);
2274 if (info->max_indirect_segments)
2275 rinfo->shadow[i].indirect_grants =
2276 kvcalloc(INDIRECT_GREFS(grants),
2277 sizeof(rinfo->shadow[i].indirect_grants[0]),
2278 - GFP_NOIO);
2279 + GFP_KERNEL);
2280 if ((rinfo->shadow[i].grants_used == NULL) ||
2281 (rinfo->shadow[i].sg == NULL) ||
2282 (info->max_indirect_segments &&
2283 @@ -2251,6 +2254,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2284 sg_init_table(rinfo->shadow[i].sg, psegs);
2285 }
2286
2287 + memalloc_noio_restore(memflags);
2288
2289 return 0;
2290
2291 @@ -2270,6 +2274,9 @@ out_of_memory:
2292 __free_page(indirect_page);
2293 }
2294 }
2295 +
2296 + memalloc_noio_restore(memflags);
2297 +
2298 return -ENOMEM;
2299 }
2300
2301 diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
2302 index be79d6c6a4e4..1bb00a959c67 100644
2303 --- a/drivers/bus/sunxi-rsb.c
2304 +++ b/drivers/bus/sunxi-rsb.c
2305 @@ -345,7 +345,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
2306 if (ret)
2307 goto unlock;
2308
2309 - *buf = readl(rsb->regs + RSB_DATA);
2310 + *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0);
2311
2312 unlock:
2313 mutex_unlock(&rsb->lock);
2314 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
2315 index 0b6e7f8d9729..90f5292e2051 100644
2316 --- a/drivers/char/ipmi/ipmi_msghandler.c
2317 +++ b/drivers/char/ipmi/ipmi_msghandler.c
2318 @@ -3207,8 +3207,8 @@ static void __get_guid(struct ipmi_smi *intf)
2319 if (rv)
2320 /* Send failed, no GUID available. */
2321 bmc->dyn_guid_set = 0;
2322 -
2323 - wait_event(intf->waitq, bmc->dyn_guid_set != 2);
2324 + else
2325 + wait_event(intf->waitq, bmc->dyn_guid_set != 2);
2326
2327 /* dyn_guid_set makes the guid data available. */
2328 smp_rmb();
2329 diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
2330 index 7a0fca659b6a..7460f230bae4 100644
2331 --- a/drivers/char/tpm/eventlog/common.c
2332 +++ b/drivers/char/tpm/eventlog/common.c
2333 @@ -99,11 +99,8 @@ static int tpm_read_log(struct tpm_chip *chip)
2334 *
2335 * If an event log is found then the securityfs files are setup to
2336 * export it to userspace, otherwise nothing is done.
2337 - *
2338 - * Returns -ENODEV if the firmware has no event log or securityfs is not
2339 - * supported.
2340 */
2341 -int tpm_bios_log_setup(struct tpm_chip *chip)
2342 +void tpm_bios_log_setup(struct tpm_chip *chip)
2343 {
2344 const char *name = dev_name(&chip->dev);
2345 unsigned int cnt;
2346 @@ -112,7 +109,7 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
2347
2348 rc = tpm_read_log(chip);
2349 if (rc < 0)
2350 - return rc;
2351 + return;
2352 log_version = rc;
2353
2354 cnt = 0;
2355 @@ -158,13 +155,12 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
2356 cnt++;
2357 }
2358
2359 - return 0;
2360 + return;
2361
2362 err:
2363 - rc = PTR_ERR(chip->bios_dir[cnt]);
2364 chip->bios_dir[cnt] = NULL;
2365 tpm_bios_log_teardown(chip);
2366 - return rc;
2367 + return;
2368 }
2369
2370 void tpm_bios_log_teardown(struct tpm_chip *chip)
2371 diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
2372 index 739b1d9d16b6..2c96977ad080 100644
2373 --- a/drivers/char/tpm/eventlog/tpm1.c
2374 +++ b/drivers/char/tpm/eventlog/tpm1.c
2375 @@ -115,6 +115,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
2376 u32 converted_event_size;
2377 u32 converted_event_type;
2378
2379 + (*pos)++;
2380 converted_event_size = do_endian_conversion(event->event_size);
2381
2382 v += sizeof(struct tcpa_event) + converted_event_size;
2383 @@ -132,7 +133,6 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
2384 ((v + sizeof(struct tcpa_event) + converted_event_size) > limit))
2385 return NULL;
2386
2387 - (*pos)++;
2388 return v;
2389 }
2390
2391 diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
2392 index b9aeda1cbcd7..e741b1157525 100644
2393 --- a/drivers/char/tpm/eventlog/tpm2.c
2394 +++ b/drivers/char/tpm/eventlog/tpm2.c
2395 @@ -94,6 +94,7 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
2396 size_t event_size;
2397 void *marker;
2398
2399 + (*pos)++;
2400 event_header = log->bios_event_log;
2401
2402 if (v == SEQ_START_TOKEN) {
2403 @@ -118,7 +119,6 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
2404 if (((v + event_size) >= limit) || (event_size == 0))
2405 return NULL;
2406
2407 - (*pos)++;
2408 return v;
2409 }
2410
2411 diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
2412 index 3d6d394a8661..58073836b555 100644
2413 --- a/drivers/char/tpm/tpm-chip.c
2414 +++ b/drivers/char/tpm/tpm-chip.c
2415 @@ -596,9 +596,7 @@ int tpm_chip_register(struct tpm_chip *chip)
2416
2417 tpm_sysfs_add_device(chip);
2418
2419 - rc = tpm_bios_log_setup(chip);
2420 - if (rc != 0 && rc != -ENODEV)
2421 - return rc;
2422 + tpm_bios_log_setup(chip);
2423
2424 tpm_add_ppi(chip);
2425
2426 diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
2427 index a7fea3e0ca86..218cb496222a 100644
2428 --- a/drivers/char/tpm/tpm.h
2429 +++ b/drivers/char/tpm/tpm.h
2430 @@ -464,7 +464,7 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
2431 int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf,
2432 size_t *bufsiz);
2433
2434 -int tpm_bios_log_setup(struct tpm_chip *chip);
2435 +void tpm_bios_log_setup(struct tpm_chip *chip);
2436 void tpm_bios_log_teardown(struct tpm_chip *chip);
2437 int tpm_dev_common_init(void);
2438 void tpm_dev_common_exit(void);
2439 diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
2440 index 956dd653a43d..c051ecba5cf8 100644
2441 --- a/drivers/clk/ingenic/jz4770-cgu.c
2442 +++ b/drivers/clk/ingenic/jz4770-cgu.c
2443 @@ -432,8 +432,10 @@ static void __init jz4770_cgu_init(struct device_node *np)
2444
2445 cgu = ingenic_cgu_new(jz4770_cgu_clocks,
2446 ARRAY_SIZE(jz4770_cgu_clocks), np);
2447 - if (!cgu)
2448 + if (!cgu) {
2449 pr_err("%s: failed to initialise CGU\n", __func__);
2450 + return;
2451 + }
2452
2453 retval = ingenic_cgu_register_clocks(cgu);
2454 if (retval)
2455 diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
2456 index a1a5f9cb439e..926696fba3f4 100644
2457 --- a/drivers/clk/ingenic/tcu.c
2458 +++ b/drivers/clk/ingenic/tcu.c
2459 @@ -189,7 +189,7 @@ static long ingenic_tcu_round_rate(struct clk_hw *hw, unsigned long req_rate,
2460 u8 prescale;
2461
2462 if (req_rate > rate)
2463 - return -EINVAL;
2464 + return rate;
2465
2466 prescale = ingenic_tcu_get_prescale(rate, req_rate);
2467
2468 diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
2469 index 648a09a1778a..edef3399c979 100644
2470 --- a/drivers/cpufreq/imx6q-cpufreq.c
2471 +++ b/drivers/cpufreq/imx6q-cpufreq.c
2472 @@ -280,6 +280,9 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
2473 void __iomem *base;
2474
2475 np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
2476 + if (!np)
2477 + np = of_find_compatible_node(NULL, NULL,
2478 + "fsl,imx6ull-ocotp");
2479 if (!np)
2480 return -ENOENT;
2481
2482 @@ -378,23 +381,24 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
2483 goto put_reg;
2484 }
2485
2486 + /* Because we have added the OPPs here, we must free them */
2487 + free_opp = true;
2488 +
2489 if (of_machine_is_compatible("fsl,imx6ul") ||
2490 of_machine_is_compatible("fsl,imx6ull")) {
2491 ret = imx6ul_opp_check_speed_grading(cpu_dev);
2492 if (ret) {
2493 if (ret == -EPROBE_DEFER)
2494 - goto put_node;
2495 + goto out_free_opp;
2496
2497 dev_err(cpu_dev, "failed to read ocotp: %d\n",
2498 ret);
2499 - goto put_node;
2500 + goto out_free_opp;
2501 }
2502 } else {
2503 imx6q_opp_check_speed_grading(cpu_dev);
2504 }
2505
2506 - /* Because we have added the OPPs here, we must free them */
2507 - free_opp = true;
2508 num = dev_pm_opp_get_opp_count(cpu_dev);
2509 if (num < 0) {
2510 ret = num;
2511 diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
2512 index 56f4bc0d209e..1806b1da4366 100644
2513 --- a/drivers/cpufreq/powernv-cpufreq.c
2514 +++ b/drivers/cpufreq/powernv-cpufreq.c
2515 @@ -1080,6 +1080,12 @@ free_and_return:
2516
2517 static inline void clean_chip_info(void)
2518 {
2519 + int i;
2520 +
2521 + /* flush any pending work items */
2522 + if (chips)
2523 + for (i = 0; i < nr_chips; i++)
2524 + cancel_work_sync(&chips[i].throttle);
2525 kfree(chips);
2526 }
2527
2528 diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
2529 index aa9ccca67045..d6c58184bb57 100644
2530 --- a/drivers/crypto/caam/caamalg_desc.c
2531 +++ b/drivers/crypto/caam/caamalg_desc.c
2532 @@ -1379,6 +1379,9 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
2533 const u32 ctx1_iv_off)
2534 {
2535 u32 *key_jump_cmd;
2536 + u32 options = cdata->algtype | OP_ALG_AS_INIT | OP_ALG_ENCRYPT;
2537 + bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) ==
2538 + OP_ALG_ALGSEL_CHACHA20);
2539
2540 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
2541 /* Skip if already shared */
2542 @@ -1417,14 +1420,15 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
2543 LDST_OFFSET_SHIFT));
2544
2545 /* Load operation */
2546 - append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
2547 - OP_ALG_ENCRYPT);
2548 + if (is_chacha20)
2549 + options |= OP_ALG_AS_FINALIZE;
2550 + append_operation(desc, options);
2551
2552 /* Perform operation */
2553 skcipher_append_src_dst(desc);
2554
2555 /* Store IV */
2556 - if (ivsize)
2557 + if (!is_chacha20 && ivsize)
2558 append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
2559 LDST_CLASS_1_CCB | (ctx1_iv_off <<
2560 LDST_OFFSET_SHIFT));
2561 @@ -1451,6 +1455,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
2562 const u32 ctx1_iv_off)
2563 {
2564 u32 *key_jump_cmd;
2565 + bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) ==
2566 + OP_ALG_ALGSEL_CHACHA20);
2567
2568 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
2569 /* Skip if already shared */
2570 @@ -1499,7 +1505,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
2571 skcipher_append_src_dst(desc);
2572
2573 /* Store IV */
2574 - if (ivsize)
2575 + if (!is_chacha20 && ivsize)
2576 append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
2577 LDST_CLASS_1_CCB | (ctx1_iv_off <<
2578 LDST_OFFSET_SHIFT));
2579 @@ -1518,7 +1524,13 @@ EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
2580 */
2581 void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
2582 {
2583 - __be64 sector_size = cpu_to_be64(512);
2584 + /*
2585 + * Set sector size to a big value, practically disabling
2586 + * sector size segmentation in xts implementation. We cannot
2587 + * take full advantage of this HW feature with existing
2588 + * crypto API / dm-crypt SW architecture.
2589 + */
2590 + __be64 sector_size = cpu_to_be64(BIT(15));
2591 u32 *key_jump_cmd;
2592
2593 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
2594 @@ -1571,7 +1583,13 @@ EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
2595 */
2596 void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
2597 {
2598 - __be64 sector_size = cpu_to_be64(512);
2599 + /*
2600 + * Set sector size to a big value, practically disabling
2601 + * sector size segmentation in xts implementation. We cannot
2602 + * take full advantage of this HW feature with existing
2603 + * crypto API / dm-crypt SW architecture.
2604 + */
2605 + __be64 sector_size = cpu_to_be64(BIT(15));
2606 u32 *key_jump_cmd;
2607
2608 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
2609 diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
2610 index a72586eccd81..954f14bddf1d 100644
2611 --- a/drivers/crypto/ccree/cc_buffer_mgr.c
2612 +++ b/drivers/crypto/ccree/cc_buffer_mgr.c
2613 @@ -87,6 +87,8 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
2614 {
2615 unsigned int nents = 0;
2616
2617 + *lbytes = 0;
2618 +
2619 while (nbytes && sg_list) {
2620 nents++;
2621 /* get the number of bytes in the last entry */
2622 @@ -95,6 +97,7 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
2623 nbytes : sg_list->length;
2624 sg_list = sg_next(sg_list);
2625 }
2626 +
2627 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
2628 return nents;
2629 }
2630 @@ -290,37 +293,25 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
2631 unsigned int nbytes, int direction, u32 *nents,
2632 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
2633 {
2634 - if (sg_is_last(sg)) {
2635 - /* One entry only case -set to DLLI */
2636 - if (dma_map_sg(dev, sg, 1, direction) != 1) {
2637 - dev_err(dev, "dma_map_sg() single buffer failed\n");
2638 - return -ENOMEM;
2639 - }
2640 - dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
2641 - &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
2642 - sg->offset, sg->length);
2643 - *lbytes = nbytes;
2644 - *nents = 1;
2645 - *mapped_nents = 1;
2646 - } else { /*sg_is_last*/
2647 - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
2648 - if (*nents > max_sg_nents) {
2649 - *nents = 0;
2650 - dev_err(dev, "Too many fragments. current %d max %d\n",
2651 - *nents, max_sg_nents);
2652 - return -ENOMEM;
2653 - }
2654 - /* In case of mmu the number of mapped nents might
2655 - * be changed from the original sgl nents
2656 - */
2657 - *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
2658 - if (*mapped_nents == 0) {
2659 - *nents = 0;
2660 - dev_err(dev, "dma_map_sg() sg buffer failed\n");
2661 - return -ENOMEM;
2662 - }
2663 + int ret = 0;
2664 +
2665 + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
2666 + if (*nents > max_sg_nents) {
2667 + *nents = 0;
2668 + dev_err(dev, "Too many fragments. current %d max %d\n",
2669 + *nents, max_sg_nents);
2670 + return -ENOMEM;
2671 + }
2672 +
2673 + ret = dma_map_sg(dev, sg, *nents, direction);
2674 + if (dma_mapping_error(dev, ret)) {
2675 + *nents = 0;
2676 + dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
2677 + return -ENOMEM;
2678 }
2679
2680 + *mapped_nents = ret;
2681 +
2682 return 0;
2683 }
2684
2685 @@ -555,11 +546,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
2686 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
2687 areq_ctx->assoclen, req->cryptlen);
2688
2689 - dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
2690 + dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
2691 + DMA_BIDIRECTIONAL);
2692 if (req->src != req->dst) {
2693 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
2694 sg_virt(req->dst));
2695 - dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2696 + dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
2697 DMA_BIDIRECTIONAL);
2698 }
2699 if (drvdata->coherent &&
2700 @@ -881,7 +873,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
2701 &src_last_bytes);
2702 sg_index = areq_ctx->src_sgl->length;
2703 //check where the data starts
2704 - while (sg_index <= size_to_skip) {
2705 + while (src_mapped_nents && (sg_index <= size_to_skip)) {
2706 src_mapped_nents--;
2707 offset -= areq_ctx->src_sgl->length;
2708 sgl = sg_next(areq_ctx->src_sgl);
2709 @@ -902,13 +894,17 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
2710
2711 if (req->src != req->dst) {
2712 size_for_map = areq_ctx->assoclen + req->cryptlen;
2713 - size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
2714 - authsize : 0;
2715 +
2716 + if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
2717 + size_for_map += authsize;
2718 + else
2719 + size_for_map -= authsize;
2720 +
2721 if (is_gcm4543)
2722 size_for_map += crypto_aead_ivsize(tfm);
2723
2724 rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
2725 - &areq_ctx->dst.nents,
2726 + &areq_ctx->dst.mapped_nents,
2727 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
2728 &dst_mapped_nents);
2729 if (rc)
2730 @@ -921,7 +917,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
2731 offset = size_to_skip;
2732
2733 //check where the data starts
2734 - while (sg_index <= size_to_skip) {
2735 + while (dst_mapped_nents && sg_index <= size_to_skip) {
2736 dst_mapped_nents--;
2737 offset -= areq_ctx->dst_sgl->length;
2738 sgl = sg_next(areq_ctx->dst_sgl);
2739 @@ -1117,13 +1113,15 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
2740 }
2741
2742 size_to_map = req->cryptlen + areq_ctx->assoclen;
2743 - if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
2744 + /* If we do in-place encryption, we also need the auth tag */
2745 + if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
2746 + (req->src == req->dst)) {
2747 size_to_map += authsize;
2748 -
2749 + }
2750 if (is_gcm4543)
2751 size_to_map += crypto_aead_ivsize(tfm);
2752 rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
2753 - &areq_ctx->src.nents,
2754 + &areq_ctx->src.mapped_nents,
2755 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
2756 LLI_MAX_NUM_OF_DATA_ENTRIES),
2757 &dummy, &mapped_nents);
2758 diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
2759 index af434872c6ff..827b6cb1236e 100644
2760 --- a/drivers/crypto/ccree/cc_buffer_mgr.h
2761 +++ b/drivers/crypto/ccree/cc_buffer_mgr.h
2762 @@ -25,6 +25,7 @@ enum cc_sg_cpy_direct {
2763
2764 struct cc_mlli {
2765 cc_sram_addr_t sram_addr;
2766 + unsigned int mapped_nents;
2767 unsigned int nents; //sg nents
2768 unsigned int mlli_nents; //mlli nents might be different than the above
2769 };
2770 diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
2771 index bf8d2197bc11..f8a48a84df2a 100644
2772 --- a/drivers/crypto/mxs-dcp.c
2773 +++ b/drivers/crypto/mxs-dcp.c
2774 @@ -20,6 +20,7 @@
2775 #include <crypto/sha.h>
2776 #include <crypto/internal/hash.h>
2777 #include <crypto/internal/skcipher.h>
2778 +#include <crypto/scatterwalk.h>
2779
2780 #define DCP_MAX_CHANS 4
2781 #define DCP_BUF_SZ PAGE_SIZE
2782 @@ -621,49 +622,46 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
2783 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
2784 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
2785 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
2786 - const int nents = sg_nents(req->src);
2787
2788 uint8_t *in_buf = sdcp->coh->sha_in_buf;
2789 uint8_t *out_buf = sdcp->coh->sha_out_buf;
2790
2791 - uint8_t *src_buf;
2792 -
2793 struct scatterlist *src;
2794
2795 - unsigned int i, len, clen;
2796 + unsigned int i, len, clen, oft = 0;
2797 int ret;
2798
2799 int fin = rctx->fini;
2800 if (fin)
2801 rctx->fini = 0;
2802
2803 - for_each_sg(req->src, src, nents, i) {
2804 - src_buf = sg_virt(src);
2805 - len = sg_dma_len(src);
2806 -
2807 - do {
2808 - if (actx->fill + len > DCP_BUF_SZ)
2809 - clen = DCP_BUF_SZ - actx->fill;
2810 - else
2811 - clen = len;
2812 -
2813 - memcpy(in_buf + actx->fill, src_buf, clen);
2814 - len -= clen;
2815 - src_buf += clen;
2816 - actx->fill += clen;
2817 + src = req->src;
2818 + len = req->nbytes;
2819
2820 - /*
2821 - * If we filled the buffer and still have some
2822 - * more data, submit the buffer.
2823 - */
2824 - if (len && actx->fill == DCP_BUF_SZ) {
2825 - ret = mxs_dcp_run_sha(req);
2826 - if (ret)
2827 - return ret;
2828 - actx->fill = 0;
2829 - rctx->init = 0;
2830 - }
2831 - } while (len);
2832 + while (len) {
2833 + if (actx->fill + len > DCP_BUF_SZ)
2834 + clen = DCP_BUF_SZ - actx->fill;
2835 + else
2836 + clen = len;
2837 +
2838 + scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
2839 + 0);
2840 +
2841 + len -= clen;
2842 + oft += clen;
2843 + actx->fill += clen;
2844 +
2845 + /*
2846 + * If we filled the buffer and still have some
2847 + * more data, submit the buffer.
2848 + */
2849 + if (len && actx->fill == DCP_BUF_SZ) {
2850 + ret = mxs_dcp_run_sha(req);
2851 + if (ret)
2852 + return ret;
2853 + actx->fill = 0;
2854 + rctx->init = 0;
2855 + }
2856 }
2857
2858 if (fin) {
2859 diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
2860 index 9cd70d1a5622..eb2df89d4924 100644
2861 --- a/drivers/firmware/arm_sdei.c
2862 +++ b/drivers/firmware/arm_sdei.c
2863 @@ -491,11 +491,6 @@ static int _sdei_event_unregister(struct sdei_event *event)
2864 {
2865 lockdep_assert_held(&sdei_events_lock);
2866
2867 - spin_lock(&sdei_list_lock);
2868 - event->reregister = false;
2869 - event->reenable = false;
2870 - spin_unlock(&sdei_list_lock);
2871 -
2872 if (event->type == SDEI_EVENT_TYPE_SHARED)
2873 return sdei_api_event_unregister(event->event_num);
2874
2875 @@ -518,6 +513,11 @@ int sdei_event_unregister(u32 event_num)
2876 break;
2877 }
2878
2879 + spin_lock(&sdei_list_lock);
2880 + event->reregister = false;
2881 + event->reenable = false;
2882 + spin_unlock(&sdei_list_lock);
2883 +
2884 err = _sdei_event_unregister(event);
2885 if (err)
2886 break;
2887 @@ -585,26 +585,15 @@ static int _sdei_event_register(struct sdei_event *event)
2888
2889 lockdep_assert_held(&sdei_events_lock);
2890
2891 - spin_lock(&sdei_list_lock);
2892 - event->reregister = true;
2893 - spin_unlock(&sdei_list_lock);
2894 -
2895 if (event->type == SDEI_EVENT_TYPE_SHARED)
2896 return sdei_api_event_register(event->event_num,
2897 sdei_entry_point,
2898 event->registered,
2899 SDEI_EVENT_REGISTER_RM_ANY, 0);
2900
2901 -
2902 err = sdei_do_cross_call(_local_event_register, event);
2903 - if (err) {
2904 - spin_lock(&sdei_list_lock);
2905 - event->reregister = false;
2906 - event->reenable = false;
2907 - spin_unlock(&sdei_list_lock);
2908 -
2909 + if (err)
2910 sdei_do_cross_call(_local_event_unregister, event);
2911 - }
2912
2913 return err;
2914 }
2915 @@ -632,8 +621,17 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
2916 break;
2917 }
2918
2919 + spin_lock(&sdei_list_lock);
2920 + event->reregister = true;
2921 + spin_unlock(&sdei_list_lock);
2922 +
2923 err = _sdei_event_register(event);
2924 if (err) {
2925 + spin_lock(&sdei_list_lock);
2926 + event->reregister = false;
2927 + event->reenable = false;
2928 + spin_unlock(&sdei_list_lock);
2929 +
2930 sdei_event_destroy(event);
2931 pr_warn("Failed to register event %u: %d\n", event_num,
2932 err);
2933 diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
2934 index ad8a4bc074fb..e3861d267d9a 100644
2935 --- a/drivers/firmware/efi/efi.c
2936 +++ b/drivers/firmware/efi/efi.c
2937 @@ -562,7 +562,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
2938 }
2939 }
2940
2941 - if (efi_enabled(EFI_MEMMAP))
2942 + if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
2943 efi_memattr_init();
2944
2945 efi_tpm_eventlog_init();
2946 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2947 index 13694d5eba47..f423b5384705 100644
2948 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2949 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2950 @@ -2176,8 +2176,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2951 {
2952 int i, r;
2953
2954 - amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2955 - amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2956
2957 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2958 if (!adev->ip_blocks[i].status.valid)
2959 @@ -3070,6 +3068,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2960 }
2961 }
2962
2963 + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2964 + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2965 +
2966 amdgpu_amdkfd_suspend(adev);
2967
2968 amdgpu_ras_suspend(adev);
2969 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
2970 index 40034efa64bb..c34ddaa65324 100644
2971 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
2972 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
2973 @@ -1026,6 +1026,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
2974 adev->gfx.mec_fw_write_wait = true;
2975 break;
2976 default:
2977 + adev->gfx.me_fw_write_wait = true;
2978 + adev->gfx.mec_fw_write_wait = true;
2979 break;
2980 }
2981 }
2982 diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
2983 index e5283dafc414..9a083cd80133 100644
2984 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
2985 +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
2986 @@ -184,6 +184,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
2987 uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
2988 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
2989 SmuMetrics_t metrics;
2990 + bool cur_value_match_level = false;
2991
2992 if (!clk_table || clk_type >= SMU_CLK_COUNT)
2993 return -EINVAL;
2994 @@ -243,8 +244,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
2995 GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
2996 size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
2997 cur_value == value ? "*" : "");
2998 + if (cur_value == value)
2999 + cur_value_match_level = true;
3000 }
3001
3002 + if (!cur_value_match_level)
3003 + size += sprintf(buf + size, " %uMhz *\n", cur_value);
3004 +
3005 return size;
3006 }
3007
3008 diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
3009 index 2a390ddd37dd..89cd6da118a3 100644
3010 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
3011 +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
3012 @@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
3013 freq = table->SocClocks[dpm_level].Freq; \
3014 break; \
3015 case SMU_MCLK: \
3016 - freq = table->MemClocks[dpm_level].Freq; \
3017 + freq = table->FClocks[dpm_level].Freq; \
3018 break; \
3019 case SMU_DCEFCLK: \
3020 freq = table->DcfClocks[dpm_level].Freq; \
3021 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
3022 index c5e9e2305fff..4b7aaad07423 100644
3023 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
3024 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
3025 @@ -2694,9 +2694,9 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
3026 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3027 {
3028 int ret = 0;
3029 - int i = 0;
3030 struct drm_dp_mst_branch *mstb = NULL;
3031
3032 + mutex_lock(&mgr->payload_lock);
3033 mutex_lock(&mgr->lock);
3034 if (mst_state == mgr->mst_state)
3035 goto out_unlock;
3036 @@ -2755,25 +2755,18 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
3037 /* this can fail if the device is gone */
3038 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3039 ret = 0;
3040 - mutex_lock(&mgr->payload_lock);
3041 - memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
3042 + memset(mgr->payloads, 0,
3043 + mgr->max_payloads * sizeof(mgr->payloads[0]));
3044 + memset(mgr->proposed_vcpis, 0,
3045 + mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
3046 mgr->payload_mask = 0;
3047 set_bit(0, &mgr->payload_mask);
3048 - for (i = 0; i < mgr->max_payloads; i++) {
3049 - struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3050 -
3051 - if (vcpi) {
3052 - vcpi->vcpi = 0;
3053 - vcpi->num_slots = 0;
3054 - }
3055 - mgr->proposed_vcpis[i] = NULL;
3056 - }
3057 mgr->vcpi_mask = 0;
3058 - mutex_unlock(&mgr->payload_lock);
3059 }
3060
3061 out_unlock:
3062 mutex_unlock(&mgr->lock);
3063 + mutex_unlock(&mgr->payload_lock);
3064 if (mstb)
3065 drm_dp_mst_topology_put_mstb(mstb);
3066 return ret;
3067 diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
3068 index a86a3ab2771c..235729f4aadb 100644
3069 --- a/drivers/gpu/drm/drm_pci.c
3070 +++ b/drivers/gpu/drm/drm_pci.c
3071 @@ -51,8 +51,6 @@
3072 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
3073 {
3074 drm_dma_handle_t *dmah;
3075 - unsigned long addr;
3076 - size_t sz;
3077
3078 /* pci_alloc_consistent only guarantees alignment to the smallest
3079 * PAGE_SIZE order which is greater than or equal to the requested size.
3080 @@ -68,20 +66,13 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
3081 dmah->size = size;
3082 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
3083 &dmah->busaddr,
3084 - GFP_KERNEL | __GFP_COMP);
3085 + GFP_KERNEL);
3086
3087 if (dmah->vaddr == NULL) {
3088 kfree(dmah);
3089 return NULL;
3090 }
3091
3092 - /* XXX - Is virt_to_page() legal for consistent mem? */
3093 - /* Reserve */
3094 - for (addr = (unsigned long)dmah->vaddr, sz = size;
3095 - sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
3096 - SetPageReserved(virt_to_page((void *)addr));
3097 - }
3098 -
3099 return dmah;
3100 }
3101
3102 @@ -94,19 +85,9 @@ EXPORT_SYMBOL(drm_pci_alloc);
3103 */
3104 void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
3105 {
3106 - unsigned long addr;
3107 - size_t sz;
3108 -
3109 - if (dmah->vaddr) {
3110 - /* XXX - Is virt_to_page() legal for consistent mem? */
3111 - /* Unreserve */
3112 - for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
3113 - sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
3114 - ClearPageReserved(virt_to_page((void *)addr));
3115 - }
3116 + if (dmah->vaddr)
3117 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
3118 dmah->busaddr);
3119 - }
3120 }
3121
3122 /**
3123 diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
3124 index 8adbf2861bff..e6795bafcbb9 100644
3125 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
3126 +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
3127 @@ -32,6 +32,7 @@ struct etnaviv_pm_domain {
3128 };
3129
3130 struct etnaviv_pm_domain_meta {
3131 + unsigned int feature;
3132 const struct etnaviv_pm_domain *domains;
3133 u32 nr_domains;
3134 };
3135 @@ -410,36 +411,78 @@ static const struct etnaviv_pm_domain doms_vg[] = {
3136
3137 static const struct etnaviv_pm_domain_meta doms_meta[] = {
3138 {
3139 + .feature = chipFeatures_PIPE_3D,
3140 .nr_domains = ARRAY_SIZE(doms_3d),
3141 .domains = &doms_3d[0]
3142 },
3143 {
3144 + .feature = chipFeatures_PIPE_2D,
3145 .nr_domains = ARRAY_SIZE(doms_2d),
3146 .domains = &doms_2d[0]
3147 },
3148 {
3149 + .feature = chipFeatures_PIPE_VG,
3150 .nr_domains = ARRAY_SIZE(doms_vg),
3151 .domains = &doms_vg[0]
3152 }
3153 };
3154
3155 +static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu)
3156 +{
3157 + unsigned int num = 0, i;
3158 +
3159 + for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
3160 + const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
3161 +
3162 + if (gpu->identity.features & meta->feature)
3163 + num += meta->nr_domains;
3164 + }
3165 +
3166 + return num;
3167 +}
3168 +
3169 +static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
3170 + unsigned int index)
3171 +{
3172 + const struct etnaviv_pm_domain *domain = NULL;
3173 + unsigned int offset = 0, i;
3174 +
3175 + for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
3176 + const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
3177 +
3178 + if (!(gpu->identity.features & meta->feature))
3179 + continue;
3180 +
3181 + if (meta->nr_domains < (index - offset)) {
3182 + offset += meta->nr_domains;
3183 + continue;
3184 + }
3185 +
3186 + domain = meta->domains + (index - offset);
3187 + }
3188 +
3189 + return domain;
3190 +}
3191 +
3192 int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
3193 struct drm_etnaviv_pm_domain *domain)
3194 {
3195 - const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe];
3196 + const unsigned int nr_domains = num_pm_domains(gpu);
3197 const struct etnaviv_pm_domain *dom;
3198
3199 - if (domain->iter >= meta->nr_domains)
3200 + if (domain->iter >= nr_domains)
3201 return -EINVAL;
3202
3203 - dom = meta->domains + domain->iter;
3204 + dom = pm_domain(gpu, domain->iter);
3205 + if (!dom)
3206 + return -EINVAL;
3207
3208 domain->id = domain->iter;
3209 domain->nr_signals = dom->nr_signals;
3210 strncpy(domain->name, dom->name, sizeof(domain->name));
3211
3212 domain->iter++;
3213 - if (domain->iter == meta->nr_domains)
3214 + if (domain->iter == nr_domains)
3215 domain->iter = 0xff;
3216
3217 return 0;
3218 @@ -448,14 +491,16 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
3219 int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
3220 struct drm_etnaviv_pm_signal *signal)
3221 {
3222 - const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe];
3223 + const unsigned int nr_domains = num_pm_domains(gpu);
3224 const struct etnaviv_pm_domain *dom;
3225 const struct etnaviv_pm_signal *sig;
3226
3227 - if (signal->domain >= meta->nr_domains)
3228 + if (signal->domain >= nr_domains)
3229 return -EINVAL;
3230
3231 - dom = meta->domains + signal->domain;
3232 + dom = pm_domain(gpu, signal->domain);
3233 + if (!dom)
3234 + return -EINVAL;
3235
3236 if (signal->iter >= dom->nr_signals)
3237 return -EINVAL;
3238 diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
3239 index 8eb2b3ec01ed..b3c77c988d1c 100644
3240 --- a/drivers/gpu/drm/i915/display/intel_ddi.c
3241 +++ b/drivers/gpu/drm/i915/display/intel_ddi.c
3242 @@ -2124,7 +2124,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
3243 return;
3244
3245 dig_port = enc_to_dig_port(&encoder->base);
3246 - intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
3247 +
3248 + if (!intel_phy_is_tc(dev_priv, phy) ||
3249 + dig_port->tc_mode != TC_PORT_TBT_ALT)
3250 + intel_display_power_get(dev_priv,
3251 + dig_port->ddi_io_power_domain);
3252
3253 /*
3254 * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
3255 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3256 index bd4e41380777..3d8dff2d894a 100644
3257 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3258 +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3259 @@ -931,11 +931,13 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
3260
3261 static void reloc_gpu_flush(struct reloc_cache *cache)
3262 {
3263 - GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
3264 + struct drm_i915_gem_object *obj = cache->rq->batch->obj;
3265 +
3266 + GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
3267 cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
3268
3269 - __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
3270 - i915_gem_object_unpin_map(cache->rq->batch->obj);
3271 + __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
3272 + i915_gem_object_unpin_map(obj);
3273
3274 intel_gt_chipset_flush(cache->rq->engine->gt);
3275
3276 diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
3277 index 2af64459b3d7..dfb29e6eeff1 100644
3278 --- a/drivers/gpu/drm/scheduler/sched_main.c
3279 +++ b/drivers/gpu/drm/scheduler/sched_main.c
3280 @@ -627,7 +627,9 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
3281
3282 trace_drm_sched_process_job(s_fence);
3283
3284 + dma_fence_get(&s_fence->finished);
3285 drm_sched_fence_finished(s_fence);
3286 + dma_fence_put(&s_fence->finished);
3287 wake_up_interruptible(&sched->wake_up_worker);
3288 }
3289
3290 diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
3291 index a7a81846d5b1..635dd697ac0b 100644
3292 --- a/drivers/i2c/busses/i2c-pca-platform.c
3293 +++ b/drivers/i2c/busses/i2c-pca-platform.c
3294 @@ -140,7 +140,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
3295 int ret = 0;
3296 int irq;
3297
3298 - irq = platform_get_irq(pdev, 0);
3299 + irq = platform_get_irq_optional(pdev, 0);
3300 /* If irq is 0, we do polling. */
3301 if (irq < 0)
3302 irq = 0;
3303 diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
3304 index 54e1fc8a495e..f7f7b5b64720 100644
3305 --- a/drivers/i2c/busses/i2c-st.c
3306 +++ b/drivers/i2c/busses/i2c-st.c
3307 @@ -434,6 +434,7 @@ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev)
3308 /**
3309 * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode
3310 * @i2c_dev: Controller's private data
3311 + * @max: Maximum amount of data to fill into the Tx FIFO
3312 *
3313 * This functions fills the Tx FIFO with fixed pattern when
3314 * in read mode to trigger clock.
3315 diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c
3316 index 14b55bacdd0f..fb078e049413 100644
3317 --- a/drivers/input/keyboard/tm2-touchkey.c
3318 +++ b/drivers/input/keyboard/tm2-touchkey.c
3319 @@ -75,6 +75,14 @@ static struct touchkey_variant aries_touchkey_variant = {
3320 .cmd_led_off = ARIES_TOUCHKEY_CMD_LED_OFF,
3321 };
3322
3323 +static const struct touchkey_variant tc360_touchkey_variant = {
3324 + .keycode_reg = 0x00,
3325 + .base_reg = 0x00,
3326 + .fixed_regulator = true,
3327 + .cmd_led_on = TM2_TOUCHKEY_CMD_LED_ON,
3328 + .cmd_led_off = TM2_TOUCHKEY_CMD_LED_OFF,
3329 +};
3330 +
3331 static int tm2_touchkey_led_brightness_set(struct led_classdev *led_dev,
3332 enum led_brightness brightness)
3333 {
3334 @@ -327,6 +335,9 @@ static const struct of_device_id tm2_touchkey_of_match[] = {
3335 }, {
3336 .compatible = "cypress,aries-touchkey",
3337 .data = &aries_touchkey_variant,
3338 + }, {
3339 + .compatible = "coreriver,tc360-touchkey",
3340 + .data = &tc360_touchkey_variant,
3341 },
3342 { },
3343 };
3344 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
3345 index dc974c288e88..08e919dbeb5d 100644
3346 --- a/drivers/input/serio/i8042-x86ia64io.h
3347 +++ b/drivers/input/serio/i8042-x86ia64io.h
3348 @@ -530,6 +530,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
3349 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
3350 },
3351 },
3352 + {
3353 + /*
3354 + * Acer Aspire 5738z
3355 + * Touchpad stops working in mux mode when dis- + re-enabled
3356 + * with the touchpad enable/disable toggle hotkey
3357 + */
3358 + .matches = {
3359 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3360 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
3361 + },
3362 + },
3363 { }
3364 };
3365
3366 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
3367 index 11f3b50dcdcb..263cf9240b16 100644
3368 --- a/drivers/irqchip/irq-gic-v3-its.c
3369 +++ b/drivers/irqchip/irq-gic-v3-its.c
3370 @@ -2985,12 +2985,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d,
3371 return 0;
3372 }
3373
3374 +static int its_vpe_retrigger(struct irq_data *d)
3375 +{
3376 + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
3377 +}
3378 +
3379 static struct irq_chip its_vpe_irq_chip = {
3380 .name = "GICv4-vpe",
3381 .irq_mask = its_vpe_mask_irq,
3382 .irq_unmask = its_vpe_unmask_irq,
3383 .irq_eoi = irq_chip_eoi_parent,
3384 .irq_set_affinity = its_vpe_set_affinity,
3385 + .irq_retrigger = its_vpe_retrigger,
3386 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
3387 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
3388 };
3389 diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
3390 index 928858dada75..f1386733d3bc 100644
3391 --- a/drivers/irqchip/irq-versatile-fpga.c
3392 +++ b/drivers/irqchip/irq-versatile-fpga.c
3393 @@ -6,6 +6,7 @@
3394 #include <linux/irq.h>
3395 #include <linux/io.h>
3396 #include <linux/irqchip.h>
3397 +#include <linux/irqchip/chained_irq.h>
3398 #include <linux/irqchip/versatile-fpga.h>
3399 #include <linux/irqdomain.h>
3400 #include <linux/module.h>
3401 @@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d)
3402
3403 static void fpga_irq_handle(struct irq_desc *desc)
3404 {
3405 + struct irq_chip *chip = irq_desc_get_chip(desc);
3406 struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
3407 - u32 status = readl(f->base + IRQ_STATUS);
3408 + u32 status;
3409 +
3410 + chained_irq_enter(chip, desc);
3411
3412 + status = readl(f->base + IRQ_STATUS);
3413 if (status == 0) {
3414 do_bad_IRQ(desc);
3415 - return;
3416 + goto out;
3417 }
3418
3419 do {
3420 @@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc)
3421 status &= ~(1 << irq);
3422 generic_handle_irq(irq_find_mapping(f->domain, irq));
3423 } while (status);
3424 +
3425 +out:
3426 + chained_irq_exit(chip, desc);
3427 }
3428
3429 /*
3430 @@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node,
3431 if (of_property_read_u32(node, "valid-mask", &valid_mask))
3432 valid_mask = 0;
3433
3434 + writel(clear_mask, base + IRQ_ENABLE_CLEAR);
3435 + writel(clear_mask, base + FIQ_ENABLE_CLEAR);
3436 +
3437 /* Some chips are cascaded from a parent IRQ */
3438 parent_irq = irq_of_parse_and_map(node, 0);
3439 if (!parent_irq) {
3440 @@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node,
3441
3442 fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
3443
3444 - writel(clear_mask, base + IRQ_ENABLE_CLEAR);
3445 - writel(clear_mask, base + FIQ_ENABLE_CLEAR);
3446 -
3447 /*
3448 * On Versatile AB/PB, some secondary interrupts have a direct
3449 * pass-thru to the primary controller for IRQs 20 and 22-31 which need
3450 diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
3451 index 54e4fdd607e1..17712456fa63 100644
3452 --- a/drivers/md/dm-clone-metadata.c
3453 +++ b/drivers/md/dm-clone-metadata.c
3454 @@ -656,7 +656,7 @@ bool dm_clone_is_range_hydrated(struct dm_clone_metadata *cmd,
3455 return (bit >= (start + nr_regions));
3456 }
3457
3458 -unsigned long dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd)
3459 +unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd)
3460 {
3461 return bitmap_weight(cmd->region_map, cmd->nr_regions);
3462 }
3463 @@ -748,7 +748,7 @@ static int __metadata_commit(struct dm_clone_metadata *cmd)
3464 static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
3465 {
3466 int r;
3467 - unsigned long word, flags;
3468 + unsigned long word;
3469
3470 word = 0;
3471 do {
3472 @@ -772,9 +772,9 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
3473 return r;
3474
3475 /* Update the changed flag */
3476 - spin_lock_irqsave(&cmd->bitmap_lock, flags);
3477 + spin_lock_irq(&cmd->bitmap_lock);
3478 dmap->changed = 0;
3479 - spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
3480 + spin_unlock_irq(&cmd->bitmap_lock);
3481
3482 return 0;
3483 }
3484 @@ -782,7 +782,6 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
3485 int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd)
3486 {
3487 int r = 0;
3488 - unsigned long flags;
3489 struct dirty_map *dmap, *next_dmap;
3490
3491 down_write(&cmd->lock);
3492 @@ -808,9 +807,9 @@ int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd)
3493 }
3494
3495 /* Swap dirty bitmaps */
3496 - spin_lock_irqsave(&cmd->bitmap_lock, flags);
3497 + spin_lock_irq(&cmd->bitmap_lock);
3498 cmd->current_dmap = next_dmap;
3499 - spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
3500 + spin_unlock_irq(&cmd->bitmap_lock);
3501
3502 /* Set old dirty bitmap as currently committing */
3503 cmd->committing_dmap = dmap;
3504 @@ -851,6 +850,12 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
3505 struct dirty_map *dmap;
3506 unsigned long word, flags;
3507
3508 + if (unlikely(region_nr >= cmd->nr_regions)) {
3509 + DMERR("Region %lu out of range (total number of regions %lu)",
3510 + region_nr, cmd->nr_regions);
3511 + return -ERANGE;
3512 + }
3513 +
3514 word = region_nr / BITS_PER_LONG;
3515
3516 spin_lock_irqsave(&cmd->bitmap_lock, flags);
3517 @@ -878,9 +883,16 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
3518 {
3519 int r = 0;
3520 struct dirty_map *dmap;
3521 - unsigned long word, region_nr, flags;
3522 + unsigned long word, region_nr;
3523
3524 - spin_lock_irqsave(&cmd->bitmap_lock, flags);
3525 + if (unlikely(start >= cmd->nr_regions || (start + nr_regions) < start ||
3526 + (start + nr_regions) > cmd->nr_regions)) {
3527 + DMERR("Invalid region range: start %lu, nr_regions %lu (total number of regions %lu)",
3528 + start, nr_regions, cmd->nr_regions);
3529 + return -ERANGE;
3530 + }
3531 +
3532 + spin_lock_irq(&cmd->bitmap_lock);
3533
3534 if (cmd->read_only) {
3535 r = -EPERM;
3536 @@ -898,7 +910,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
3537 }
3538 }
3539 out:
3540 - spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
3541 + spin_unlock_irq(&cmd->bitmap_lock);
3542
3543 return r;
3544 }
3545 @@ -965,13 +977,11 @@ out:
3546
3547 void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
3548 {
3549 - unsigned long flags;
3550 -
3551 down_write(&cmd->lock);
3552
3553 - spin_lock_irqsave(&cmd->bitmap_lock, flags);
3554 + spin_lock_irq(&cmd->bitmap_lock);
3555 cmd->read_only = 1;
3556 - spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
3557 + spin_unlock_irq(&cmd->bitmap_lock);
3558
3559 if (!cmd->fail_io)
3560 dm_bm_set_read_only(cmd->bm);
3561 @@ -981,13 +991,11 @@ void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
3562
3563 void dm_clone_metadata_set_read_write(struct dm_clone_metadata *cmd)
3564 {
3565 - unsigned long flags;
3566 -
3567 down_write(&cmd->lock);
3568
3569 - spin_lock_irqsave(&cmd->bitmap_lock, flags);
3570 + spin_lock_irq(&cmd->bitmap_lock);
3571 cmd->read_only = 0;
3572 - spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
3573 + spin_unlock_irq(&cmd->bitmap_lock);
3574
3575 if (!cmd->fail_io)
3576 dm_bm_set_read_write(cmd->bm);
3577 diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h
3578 index c7848c49aef8..d848b8799c07 100644
3579 --- a/drivers/md/dm-clone-metadata.h
3580 +++ b/drivers/md/dm-clone-metadata.h
3581 @@ -44,7 +44,9 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
3582 * @start: Starting region number
3583 * @nr_regions: Number of regions in the range
3584 *
3585 - * This function doesn't block, so it's safe to call it from interrupt context.
3586 + * This function doesn't block, but since it uses spin_lock_irq()/spin_unlock_irq()
3587 + * it's NOT safe to call it from any context where interrupts are disabled, e.g.,
3588 + * from interrupt context.
3589 */
3590 int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
3591 unsigned long nr_regions);
3592 @@ -154,7 +156,7 @@ bool dm_clone_is_range_hydrated(struct dm_clone_metadata *cmd,
3593 /*
3594 * Returns the number of hydrated regions.
3595 */
3596 -unsigned long dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd);
3597 +unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd);
3598
3599 /*
3600 * Returns the first unhydrated region with region_nr >= @start
3601 diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
3602 index e6e5d24a79f5..eb7a5d3ba81a 100644
3603 --- a/drivers/md/dm-clone-target.c
3604 +++ b/drivers/md/dm-clone-target.c
3605 @@ -282,7 +282,7 @@ static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
3606 /* Get the address of the region in sectors */
3607 static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
3608 {
3609 - return (region_nr << clone->region_shift);
3610 + return ((sector_t)region_nr << clone->region_shift);
3611 }
3612
3613 /* Get the region number of the bio */
3614 @@ -293,10 +293,17 @@ static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
3615
3616 /* Get the region range covered by the bio */
3617 static void bio_region_range(struct clone *clone, struct bio *bio,
3618 - unsigned long *rs, unsigned long *re)
3619 + unsigned long *rs, unsigned long *nr_regions)
3620 {
3621 + unsigned long end;
3622 +
3623 *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
3624 - *re = bio_end_sector(bio) >> clone->region_shift;
3625 + end = bio_end_sector(bio) >> clone->region_shift;
3626 +
3627 + if (*rs >= end)
3628 + *nr_regions = 0;
3629 + else
3630 + *nr_regions = end - *rs;
3631 }
3632
3633 /* Check whether a bio overwrites a region */
3634 @@ -338,8 +345,6 @@ static void submit_bios(struct bio_list *bios)
3635 */
3636 static void issue_bio(struct clone *clone, struct bio *bio)
3637 {
3638 - unsigned long flags;
3639 -
3640 if (!bio_triggers_commit(clone, bio)) {
3641 generic_make_request(bio);
3642 return;
3643 @@ -358,9 +363,9 @@ static void issue_bio(struct clone *clone, struct bio *bio)
3644 * Batch together any bios that trigger commits and then issue a single
3645 * commit for them in process_deferred_flush_bios().
3646 */
3647 - spin_lock_irqsave(&clone->lock, flags);
3648 + spin_lock_irq(&clone->lock);
3649 bio_list_add(&clone->deferred_flush_bios, bio);
3650 - spin_unlock_irqrestore(&clone->lock, flags);
3651 + spin_unlock_irq(&clone->lock);
3652
3653 wake_worker(clone);
3654 }
3655 @@ -456,7 +461,7 @@ static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
3656
3657 static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
3658 {
3659 - unsigned long rs, re;
3660 + unsigned long rs, nr_regions;
3661
3662 /*
3663 * If the destination device supports discards, remap and trim the
3664 @@ -465,9 +470,9 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
3665 */
3666 if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
3667 remap_to_dest(clone, bio);
3668 - bio_region_range(clone, bio, &rs, &re);
3669 - trim_bio(bio, rs << clone->region_shift,
3670 - (re - rs) << clone->region_shift);
3671 + bio_region_range(clone, bio, &rs, &nr_regions);
3672 + trim_bio(bio, region_to_sector(clone, rs),
3673 + nr_regions << clone->region_shift);
3674 generic_make_request(bio);
3675 } else
3676 bio_endio(bio);
3677 @@ -475,12 +480,21 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
3678
3679 static void process_discard_bio(struct clone *clone, struct bio *bio)
3680 {
3681 - unsigned long rs, re, flags;
3682 + unsigned long rs, nr_regions;
3683
3684 - bio_region_range(clone, bio, &rs, &re);
3685 - BUG_ON(re > clone->nr_regions);
3686 + bio_region_range(clone, bio, &rs, &nr_regions);
3687 + if (!nr_regions) {
3688 + bio_endio(bio);
3689 + return;
3690 + }
3691
3692 - if (unlikely(rs == re)) {
3693 + if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs ||
3694 + (rs + nr_regions) > clone->nr_regions)) {
3695 + DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)",
3696 + clone_device_name(clone), rs, nr_regions,
3697 + clone->nr_regions,
3698 + (unsigned long long)bio->bi_iter.bi_sector,
3699 + bio_sectors(bio));
3700 bio_endio(bio);
3701 return;
3702 }
3703 @@ -489,7 +503,7 @@ static void process_discard_bio(struct clone *clone, struct bio *bio)
3704 * The covered regions are already hydrated so we just need to pass
3705 * down the discard.
3706 */
3707 - if (dm_clone_is_range_hydrated(clone->cmd, rs, re - rs)) {
3708 + if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) {
3709 complete_discard_bio(clone, bio, true);
3710 return;
3711 }
3712 @@ -507,9 +521,9 @@ static void process_discard_bio(struct clone *clone, struct bio *bio)
3713 /*
3714 * Defer discard processing.
3715 */
3716 - spin_lock_irqsave(&clone->lock, flags);
3717 + spin_lock_irq(&clone->lock);
3718 bio_list_add(&clone->deferred_discard_bios, bio);
3719 - spin_unlock_irqrestore(&clone->lock, flags);
3720 + spin_unlock_irq(&clone->lock);
3721
3722 wake_worker(clone);
3723 }
3724 @@ -784,11 +798,14 @@ static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr
3725 struct dm_io_region from, to;
3726 struct clone *clone = hd->clone;
3727
3728 + if (WARN_ON(!nr_regions))
3729 + return;
3730 +
3731 region_size = clone->region_size;
3732 region_start = hd->region_nr;
3733 region_end = region_start + nr_regions - 1;
3734
3735 - total_size = (nr_regions - 1) << clone->region_shift;
3736 + total_size = region_to_sector(clone, nr_regions - 1);
3737
3738 if (region_end == clone->nr_regions - 1) {
3739 /*
3740 @@ -1167,13 +1184,13 @@ static void process_deferred_discards(struct clone *clone)
3741 int r = -EPERM;
3742 struct bio *bio;
3743 struct blk_plug plug;
3744 - unsigned long rs, re, flags;
3745 + unsigned long rs, nr_regions;
3746 struct bio_list discards = BIO_EMPTY_LIST;
3747
3748 - spin_lock_irqsave(&clone->lock, flags);
3749 + spin_lock_irq(&clone->lock);
3750 bio_list_merge(&discards, &clone->deferred_discard_bios);
3751 bio_list_init(&clone->deferred_discard_bios);
3752 - spin_unlock_irqrestore(&clone->lock, flags);
3753 + spin_unlock_irq(&clone->lock);
3754
3755 if (bio_list_empty(&discards))
3756 return;
3757 @@ -1183,14 +1200,13 @@ static void process_deferred_discards(struct clone *clone)
3758
3759 /* Update the metadata */
3760 bio_list_for_each(bio, &discards) {
3761 - bio_region_range(clone, bio, &rs, &re);
3762 + bio_region_range(clone, bio, &rs, &nr_regions);
3763 /*
3764 * A discard request might cover regions that have been already
3765 * hydrated. There is no need to update the metadata for these
3766 * regions.
3767 */
3768 - r = dm_clone_cond_set_range(clone->cmd, rs, re - rs);
3769 -
3770 + r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions);
3771 if (unlikely(r))
3772 break;
3773 }
3774 @@ -1203,13 +1219,12 @@ out:
3775
3776 static void process_deferred_bios(struct clone *clone)
3777 {
3778 - unsigned long flags;
3779 struct bio_list bios = BIO_EMPTY_LIST;
3780
3781 - spin_lock_irqsave(&clone->lock, flags);
3782 + spin_lock_irq(&clone->lock);
3783 bio_list_merge(&bios, &clone->deferred_bios);
3784 bio_list_init(&clone->deferred_bios);
3785 - spin_unlock_irqrestore(&clone->lock, flags);
3786 + spin_unlock_irq(&clone->lock);
3787
3788 if (bio_list_empty(&bios))
3789 return;
3790 @@ -1220,7 +1235,6 @@ static void process_deferred_bios(struct clone *clone)
3791 static void process_deferred_flush_bios(struct clone *clone)
3792 {
3793 struct bio *bio;
3794 - unsigned long flags;
3795 bool dest_dev_flushed;
3796 struct bio_list bios = BIO_EMPTY_LIST;
3797 struct bio_list bio_completions = BIO_EMPTY_LIST;
3798 @@ -1229,13 +1243,13 @@ static void process_deferred_flush_bios(struct clone *clone)
3799 * If there are any deferred flush bios, we must commit the metadata
3800 * before issuing them or signaling their completion.
3801 */
3802 - spin_lock_irqsave(&clone->lock, flags);
3803 + spin_lock_irq(&clone->lock);
3804 bio_list_merge(&bios, &clone->deferred_flush_bios);
3805 bio_list_init(&clone->deferred_flush_bios);
3806
3807 bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
3808 bio_list_init(&clone->deferred_flush_completions);
3809 - spin_unlock_irqrestore(&clone->lock, flags);
3810 + spin_unlock_irq(&clone->lock);
3811
3812 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
3813 !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
3814 @@ -1455,7 +1469,7 @@ static void clone_status(struct dm_target *ti, status_type_t type,
3815 goto error;
3816 }
3817
3818 - DMEMIT("%u %llu/%llu %llu %lu/%lu %u ",
3819 + DMEMIT("%u %llu/%llu %llu %u/%lu %u ",
3820 DM_CLONE_METADATA_BLOCK_SIZE,
3821 (unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks),
3822 (unsigned long long)nr_metadata_blocks,
3823 @@ -1775,6 +1789,7 @@ error:
3824 static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3825 {
3826 int r;
3827 + sector_t nr_regions;
3828 struct clone *clone;
3829 struct dm_arg_set as;
3830
3831 @@ -1816,7 +1831,16 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3832 goto out_with_source_dev;
3833
3834 clone->region_shift = __ffs(clone->region_size);
3835 - clone->nr_regions = dm_sector_div_up(ti->len, clone->region_size);
3836 + nr_regions = dm_sector_div_up(ti->len, clone->region_size);
3837 +
3838 + /* Check for overflow */
3839 + if (nr_regions != (unsigned long)nr_regions) {
3840 + ti->error = "Too many regions. Consider increasing the region size";
3841 + r = -EOVERFLOW;
3842 + goto out_with_source_dev;
3843 + }
3844 +
3845 + clone->nr_regions = nr_regions;
3846
3847 r = validate_nr_regions(clone->nr_regions, &ti->error);
3848 if (r)
3849 diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
3850 index 145bc2e7eaf0..56248773a9e0 100644
3851 --- a/drivers/md/dm-integrity.c
3852 +++ b/drivers/md/dm-integrity.c
3853 @@ -1514,7 +1514,7 @@ static void integrity_metadata(struct work_struct *w)
3854 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
3855 char *checksums;
3856 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
3857 - char checksums_onstack[HASH_MAX_DIGESTSIZE];
3858 + char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
3859 unsigned sectors_to_process = dio->range.n_sectors;
3860 sector_t sector = dio->range.logical_sector;
3861
3862 @@ -1743,7 +1743,7 @@ retry_kmap:
3863 } while (++s < ic->sectors_per_block);
3864 #ifdef INTERNAL_VERIFY
3865 if (ic->internal_hash) {
3866 - char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
3867 + char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
3868
3869 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
3870 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
3871 diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
3872 index 3ceeb6b404ed..49147e634046 100644
3873 --- a/drivers/md/dm-verity-fec.c
3874 +++ b/drivers/md/dm-verity-fec.c
3875 @@ -551,6 +551,7 @@ void verity_fec_dtr(struct dm_verity *v)
3876 mempool_exit(&f->rs_pool);
3877 mempool_exit(&f->prealloc_pool);
3878 mempool_exit(&f->extra_pool);
3879 + mempool_exit(&f->output_pool);
3880 kmem_cache_destroy(f->cache);
3881
3882 if (f->data_bufio)
3883 diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
3884 index 184dabce1bad..5bf60d6be96a 100644
3885 --- a/drivers/md/dm-writecache.c
3886 +++ b/drivers/md/dm-writecache.c
3887 @@ -872,6 +872,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
3888 struct wc_entry *e = &wc->entries[b];
3889 e->index = b;
3890 e->write_in_progress = false;
3891 + cond_resched();
3892 }
3893
3894 return 0;
3895 @@ -926,6 +927,7 @@ static void writecache_resume(struct dm_target *ti)
3896 e->original_sector = le64_to_cpu(wme.original_sector);
3897 e->seq_count = le64_to_cpu(wme.seq_count);
3898 }
3899 + cond_resched();
3900 }
3901 #endif
3902 for (b = 0; b < wc->n_blocks; b++) {
3903 @@ -1770,8 +1772,10 @@ static int init_memory(struct dm_writecache *wc)
3904 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
3905 pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
3906
3907 - for (b = 0; b < wc->n_blocks; b++)
3908 + for (b = 0; b < wc->n_blocks; b++) {
3909 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
3910 + cond_resched();
3911 + }
3912
3913 writecache_flush_all_metadata(wc);
3914 writecache_commit_flushed(wc, false);
3915 diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
3916 index 5205cf9bbfd9..e0a6cf9239f1 100644
3917 --- a/drivers/md/dm-zoned-metadata.c
3918 +++ b/drivers/md/dm-zoned-metadata.c
3919 @@ -1107,7 +1107,6 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
3920
3921 if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) {
3922 set_bit(DMZ_RND, &zone->flags);
3923 - zmd->nr_rnd_zones++;
3924 } else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ ||
3925 blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) {
3926 set_bit(DMZ_SEQ, &zone->flags);
3927 diff --git a/drivers/md/md.c b/drivers/md/md.c
3928 index 4e7c9f398bc6..6b69a12ca2d8 100644
3929 --- a/drivers/md/md.c
3930 +++ b/drivers/md/md.c
3931 @@ -6040,7 +6040,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
3932 static void mddev_detach(struct mddev *mddev)
3933 {
3934 md_bitmap_wait_behind_writes(mddev);
3935 - if (mddev->pers && mddev->pers->quiesce) {
3936 + if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
3937 mddev->pers->quiesce(mddev, 1);
3938 mddev->pers->quiesce(mddev, 0);
3939 }
3940 diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
3941 index 34b7046d9702..1adcd1ed1664 100644
3942 --- a/drivers/media/i2c/ov5695.c
3943 +++ b/drivers/media/i2c/ov5695.c
3944 @@ -971,16 +971,9 @@ unlock_and_return:
3945 return ret;
3946 }
3947
3948 -/* Calculate the delay in us by clock rate and clock cycles */
3949 -static inline u32 ov5695_cal_delay(u32 cycles)
3950 -{
3951 - return DIV_ROUND_UP(cycles, OV5695_XVCLK_FREQ / 1000 / 1000);
3952 -}
3953 -
3954 static int __ov5695_power_on(struct ov5695 *ov5695)
3955 {
3956 - int ret;
3957 - u32 delay_us;
3958 + int i, ret;
3959 struct device *dev = &ov5695->client->dev;
3960
3961 ret = clk_prepare_enable(ov5695->xvclk);
3962 @@ -991,21 +984,28 @@ static int __ov5695_power_on(struct ov5695 *ov5695)
3963
3964 gpiod_set_value_cansleep(ov5695->reset_gpio, 1);
3965
3966 - ret = regulator_bulk_enable(OV5695_NUM_SUPPLIES, ov5695->supplies);
3967 - if (ret < 0) {
3968 - dev_err(dev, "Failed to enable regulators\n");
3969 - goto disable_clk;
3970 + /*
3971 + * The hardware requires the regulators to be powered on in order,
3972 + * so enable them one by one.
3973 + */
3974 + for (i = 0; i < OV5695_NUM_SUPPLIES; i++) {
3975 + ret = regulator_enable(ov5695->supplies[i].consumer);
3976 + if (ret) {
3977 + dev_err(dev, "Failed to enable %s: %d\n",
3978 + ov5695->supplies[i].supply, ret);
3979 + goto disable_reg_clk;
3980 + }
3981 }
3982
3983 gpiod_set_value_cansleep(ov5695->reset_gpio, 0);
3984
3985 - /* 8192 cycles prior to first SCCB transaction */
3986 - delay_us = ov5695_cal_delay(8192);
3987 - usleep_range(delay_us, delay_us * 2);
3988 + usleep_range(1000, 1200);
3989
3990 return 0;
3991
3992 -disable_clk:
3993 +disable_reg_clk:
3994 + for (--i; i >= 0; i--)
3995 + regulator_disable(ov5695->supplies[i].consumer);
3996 clk_disable_unprepare(ov5695->xvclk);
3997
3998 return ret;
3999 @@ -1013,9 +1013,22 @@ disable_clk:
4000
4001 static void __ov5695_power_off(struct ov5695 *ov5695)
4002 {
4003 + struct device *dev = &ov5695->client->dev;
4004 + int i, ret;
4005 +
4006 clk_disable_unprepare(ov5695->xvclk);
4007 gpiod_set_value_cansleep(ov5695->reset_gpio, 1);
4008 - regulator_bulk_disable(OV5695_NUM_SUPPLIES, ov5695->supplies);
4009 +
4010 + /*
4011 + * The hardware requires the regulators to be powered off in order,
4012 + * so disable them one by one.
4013 + */
4014 + for (i = OV5695_NUM_SUPPLIES - 1; i >= 0; i--) {
4015 + ret = regulator_disable(ov5695->supplies[i].consumer);
4016 + if (ret)
4017 + dev_err(dev, "Failed to disable %s: %d\n",
4018 + ov5695->supplies[i].supply, ret);
4019 + }
4020 }
4021
4022 static int __maybe_unused ov5695_runtime_resume(struct device *dev)
4023 @@ -1285,7 +1298,7 @@ static int ov5695_probe(struct i2c_client *client,
4024 if (clk_get_rate(ov5695->xvclk) != OV5695_XVCLK_FREQ)
4025 dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n");
4026
4027 - ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
4028 + ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
4029 if (IS_ERR(ov5695->reset_gpio)) {
4030 dev_err(dev, "Failed to get reset-gpios\n");
4031 return -EINVAL;
4032 diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
4033 index 078141712c88..0b977e73ceb2 100644
4034 --- a/drivers/media/i2c/video-i2c.c
4035 +++ b/drivers/media/i2c/video-i2c.c
4036 @@ -255,7 +255,7 @@ static int amg88xx_set_power(struct video_i2c_data *data, bool on)
4037 return amg88xx_set_power_off(data);
4038 }
4039
4040 -#if IS_ENABLED(CONFIG_HWMON)
4041 +#if IS_REACHABLE(CONFIG_HWMON)
4042
4043 static const u32 amg88xx_temp_config[] = {
4044 HWMON_T_INPUT,
4045 diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
4046 index d3d1748a7ef6..33f70e1def94 100644
4047 --- a/drivers/media/platform/qcom/venus/firmware.c
4048 +++ b/drivers/media/platform/qcom/venus/firmware.c
4049 @@ -44,8 +44,14 @@ static void venus_reset_cpu(struct venus_core *core)
4050
4051 int venus_set_hw_state(struct venus_core *core, bool resume)
4052 {
4053 - if (core->use_tz)
4054 - return qcom_scm_set_remote_state(resume, 0);
4055 + int ret;
4056 +
4057 + if (core->use_tz) {
4058 + ret = qcom_scm_set_remote_state(resume, 0);
4059 + if (resume && ret == -EINVAL)
4060 + ret = 0;
4061 + return ret;
4062 + }
4063
4064 if (resume)
4065 venus_reset_cpu(core);
4066 diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
4067 index 2293d936e49c..7f515a4b9bd1 100644
4068 --- a/drivers/media/platform/qcom/venus/hfi_parser.c
4069 +++ b/drivers/media/platform/qcom/venus/hfi_parser.c
4070 @@ -181,6 +181,7 @@ static void parse_codecs(struct venus_core *core, void *data)
4071 if (IS_V1(core)) {
4072 core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC;
4073 core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK;
4074 + core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC;
4075 }
4076 }
4077
4078 diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
4079 index 223161f9c403..955a49b8e9c0 100644
4080 --- a/drivers/media/platform/ti-vpe/cal.c
4081 +++ b/drivers/media/platform/ti-vpe/cal.c
4082 @@ -266,8 +266,6 @@ struct cal_ctx {
4083 struct v4l2_subdev *sensor;
4084 struct v4l2_fwnode_endpoint endpoint;
4085
4086 - struct v4l2_async_subdev asd;
4087 -
4088 struct v4l2_fh fh;
4089 struct cal_dev *dev;
4090 struct cc_data *cc;
4091 @@ -537,16 +535,16 @@ static void enable_irqs(struct cal_ctx *ctx)
4092
4093 static void disable_irqs(struct cal_ctx *ctx)
4094 {
4095 + u32 val;
4096 +
4097 /* Disable IRQ_WDMA_END 0/1 */
4098 - reg_write_field(ctx->dev,
4099 - CAL_HL_IRQENABLE_CLR(2),
4100 - CAL_HL_IRQ_CLEAR,
4101 - CAL_HL_IRQ_MASK(ctx->csi2_port));
4102 + val = 0;
4103 + set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
4104 + reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val);
4105 /* Disable IRQ_WDMA_START 0/1 */
4106 - reg_write_field(ctx->dev,
4107 - CAL_HL_IRQENABLE_CLR(3),
4108 - CAL_HL_IRQ_CLEAR,
4109 - CAL_HL_IRQ_MASK(ctx->csi2_port));
4110 + val = 0;
4111 + set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
4112 + reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val);
4113 /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
4114 reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
4115 }
4116 @@ -1648,7 +1646,6 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
4117
4118 parent = pdev->dev.of_node;
4119
4120 - asd = &ctx->asd;
4121 endpoint = &ctx->endpoint;
4122
4123 ep_node = NULL;
4124 @@ -1695,8 +1692,6 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
4125 ctx_dbg(3, ctx, "can't get remote parent\n");
4126 goto cleanup_exit;
4127 }
4128 - asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
4129 - asd->match.fwnode = of_fwnode_handle(sensor_node);
4130
4131 v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), endpoint);
4132
4133 @@ -1726,9 +1721,17 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
4134
4135 v4l2_async_notifier_init(&ctx->notifier);
4136
4137 + asd = kzalloc(sizeof(*asd), GFP_KERNEL);
4138 + if (!asd)
4139 + goto cleanup_exit;
4140 +
4141 + asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
4142 + asd->match.fwnode = of_fwnode_handle(sensor_node);
4143 +
4144 ret = v4l2_async_notifier_add_subdev(&ctx->notifier, asd);
4145 if (ret) {
4146 ctx_err(ctx, "Error adding asd\n");
4147 + kfree(asd);
4148 goto cleanup_exit;
4149 }
4150
4151 diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
4152 index a56fc634d2d6..ea91a9afa6a0 100644
4153 --- a/drivers/media/rc/keymaps/Makefile
4154 +++ b/drivers/media/rc/keymaps/Makefile
4155 @@ -117,6 +117,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
4156 rc-videomate-m1f.o \
4157 rc-videomate-s350.o \
4158 rc-videomate-tv-pvr.o \
4159 + rc-videostrong-kii-pro.o \
4160 rc-wetek-hub.o \
4161 rc-wetek-play2.o \
4162 rc-winfast.o \
4163 diff --git a/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c b/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c
4164 new file mode 100644
4165 index 000000000000..414d4d231e7e
4166 --- /dev/null
4167 +++ b/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c
4168 @@ -0,0 +1,83 @@
4169 +// SPDX-License-Identifier: GPL-2.0+
4170 +//
4171 +// Copyright (C) 2019 Mohammad Rasim <mohammad.rasim96@gmail.com>
4172 +
4173 +#include <media/rc-map.h>
4174 +#include <linux/module.h>
4175 +
4176 +//
4177 +// Keytable for the Videostrong KII Pro STB remote control
4178 +//
4179 +
4180 +static struct rc_map_table kii_pro[] = {
4181 + { 0x59, KEY_POWER },
4182 + { 0x19, KEY_MUTE },
4183 + { 0x42, KEY_RED },
4184 + { 0x40, KEY_GREEN },
4185 + { 0x00, KEY_YELLOW },
4186 + { 0x03, KEY_BLUE },
4187 + { 0x4a, KEY_BACK },
4188 + { 0x48, KEY_FORWARD },
4189 + { 0x08, KEY_PREVIOUSSONG},
4190 + { 0x0b, KEY_NEXTSONG},
4191 + { 0x46, KEY_PLAYPAUSE },
4192 + { 0x44, KEY_STOP },
4193 + { 0x1f, KEY_FAVORITES}, //KEY_F5?
4194 + { 0x04, KEY_PVR },
4195 + { 0x4d, KEY_EPG },
4196 + { 0x02, KEY_INFO },
4197 + { 0x09, KEY_SUBTITLE },
4198 + { 0x01, KEY_AUDIO },
4199 + { 0x0d, KEY_HOMEPAGE },
4200 + { 0x11, KEY_TV }, // DTV ?
4201 + { 0x06, KEY_UP },
4202 + { 0x5a, KEY_LEFT },
4203 + { 0x1a, KEY_ENTER }, // KEY_OK ?
4204 + { 0x1b, KEY_RIGHT },
4205 + { 0x16, KEY_DOWN },
4206 + { 0x45, KEY_MENU },
4207 + { 0x05, KEY_ESC },
4208 + { 0x13, KEY_VOLUMEUP },
4209 + { 0x17, KEY_VOLUMEDOWN },
4210 + { 0x58, KEY_APPSELECT },
4211 + { 0x12, KEY_VENDOR }, // mouse
4212 + { 0x55, KEY_PAGEUP }, // KEY_CHANNELUP ?
4213 + { 0x15, KEY_PAGEDOWN }, // KEY_CHANNELDOWN ?
4214 + { 0x52, KEY_1 },
4215 + { 0x50, KEY_2 },
4216 + { 0x10, KEY_3 },
4217 + { 0x56, KEY_4 },
4218 + { 0x54, KEY_5 },
4219 + { 0x14, KEY_6 },
4220 + { 0x4e, KEY_7 },
4221 + { 0x4c, KEY_8 },
4222 + { 0x0c, KEY_9 },
4223 + { 0x18, KEY_WWW }, // KEY_F7
4224 + { 0x0f, KEY_0 },
4225 + { 0x51, KEY_BACKSPACE },
4226 +};
4227 +
4228 +static struct rc_map_list kii_pro_map = {
4229 + .map = {
4230 + .scan = kii_pro,
4231 + .size = ARRAY_SIZE(kii_pro),
4232 + .rc_proto = RC_PROTO_NEC,
4233 + .name = RC_MAP_KII_PRO,
4234 + }
4235 +};
4236 +
4237 +static int __init init_rc_map_kii_pro(void)
4238 +{
4239 + return rc_map_register(&kii_pro_map);
4240 +}
4241 +
4242 +static void __exit exit_rc_map_kii_pro(void)
4243 +{
4244 + rc_map_unregister(&kii_pro_map);
4245 +}
4246 +
4247 +module_init(init_rc_map_kii_pro)
4248 +module_exit(exit_rc_map_kii_pro)
4249 +
4250 +MODULE_LICENSE("GPL");
4251 +MODULE_AUTHOR("Mohammad Rasim <mohammad.rasim96@gmail.com>");
4252 diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
4253 index 7841c11411d0..4faa8d2e5d04 100644
4254 --- a/drivers/mfd/dln2.c
4255 +++ b/drivers/mfd/dln2.c
4256 @@ -90,6 +90,11 @@ struct dln2_mod_rx_slots {
4257 spinlock_t lock;
4258 };
4259
4260 +enum dln2_endpoint {
4261 + DLN2_EP_OUT = 0,
4262 + DLN2_EP_IN = 1,
4263 +};
4264 +
4265 struct dln2_dev {
4266 struct usb_device *usb_dev;
4267 struct usb_interface *interface;
4268 @@ -733,10 +738,10 @@ static int dln2_probe(struct usb_interface *interface,
4269 hostif->desc.bNumEndpoints < 2)
4270 return -ENODEV;
4271
4272 - epin = &hostif->endpoint[0].desc;
4273 - epout = &hostif->endpoint[1].desc;
4274 + epout = &hostif->endpoint[DLN2_EP_OUT].desc;
4275 if (!usb_endpoint_is_bulk_out(epout))
4276 return -ENODEV;
4277 + epin = &hostif->endpoint[DLN2_EP_IN].desc;
4278 if (!usb_endpoint_is_bulk_in(epin))
4279 return -ENODEV;
4280
4281 diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
4282 index fcfb50f84c8b..fd1251ec8471 100644
4283 --- a/drivers/mmc/host/sdhci-of-esdhc.c
4284 +++ b/drivers/mmc/host/sdhci-of-esdhc.c
4285 @@ -734,23 +734,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
4286 {
4287 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4288 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
4289 - u32 val;
4290 + u32 val, bus_width = 0;
4291
4292 + /*
4293 + * Add delay to make sure all the DMA transfers are finished
4294 + * for quirk.
4295 + */
4296 if (esdhc->quirk_delay_before_data_reset &&
4297 (mask & SDHCI_RESET_DATA) &&
4298 (host->flags & SDHCI_REQ_USE_DMA))
4299 mdelay(5);
4300
4301 + /*
4302 + * Save bus-width for eSDHC whose vendor version is 2.2
4303 + * or lower for data reset.
4304 + */
4305 + if ((mask & SDHCI_RESET_DATA) &&
4306 + (esdhc->vendor_ver <= VENDOR_V_22)) {
4307 + val = sdhci_readl(host, ESDHC_PROCTL);
4308 + bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
4309 + }
4310 +
4311 sdhci_reset(host, mask);
4312
4313 - sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
4314 - sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
4315 + /*
4316 + * Restore bus-width setting and interrupt registers for eSDHC
4317 + * whose vendor version is 2.2 or lower for data reset.
4318 + */
4319 + if ((mask & SDHCI_RESET_DATA) &&
4320 + (esdhc->vendor_ver <= VENDOR_V_22)) {
4321 + val = sdhci_readl(host, ESDHC_PROCTL);
4322 + val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
4323 + val |= bus_width;
4324 + sdhci_writel(host, val, ESDHC_PROCTL);
4325 +
4326 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
4327 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
4328 + }
4329
4330 - if (mask & SDHCI_RESET_ALL) {
4331 + /*
4332 + * Some bits have to be cleaned manually for eSDHC whose spec
4333 + * version is higher than 3.0 for all reset.
4334 + */
4335 + if ((mask & SDHCI_RESET_ALL) &&
4336 + (esdhc->spec_ver >= SDHCI_SPEC_300)) {
4337 val = sdhci_readl(host, ESDHC_TBCTL);
4338 val &= ~ESDHC_TB_EN;
4339 sdhci_writel(host, val, ESDHC_TBCTL);
4340
4341 + /*
4342 + * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
4343 + * 0 for quirk.
4344 + */
4345 if (esdhc->quirk_unreliable_pulse_detection) {
4346 val = sdhci_readl(host, ESDHC_DLLCFG1);
4347 val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
4348 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
4349 index 4478b94d4791..50514fedbc76 100644
4350 --- a/drivers/mmc/host/sdhci.c
4351 +++ b/drivers/mmc/host/sdhci.c
4352 @@ -981,7 +981,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
4353 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
4354 }
4355
4356 -static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
4357 +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
4358 {
4359 if (enable)
4360 host->ier |= SDHCI_INT_DATA_TIMEOUT;
4361 @@ -990,28 +990,31 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
4362 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
4363 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
4364 }
4365 +EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
4366
4367 -static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
4368 +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
4369 {
4370 - u8 count;
4371 -
4372 - if (host->ops->set_timeout) {
4373 - host->ops->set_timeout(host, cmd);
4374 - } else {
4375 - bool too_big = false;
4376 -
4377 - count = sdhci_calc_timeout(host, cmd, &too_big);
4378 + bool too_big = false;
4379 + u8 count = sdhci_calc_timeout(host, cmd, &too_big);
4380 +
4381 + if (too_big &&
4382 + host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
4383 + sdhci_calc_sw_timeout(host, cmd);
4384 + sdhci_set_data_timeout_irq(host, false);
4385 + } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
4386 + sdhci_set_data_timeout_irq(host, true);
4387 + }
4388
4389 - if (too_big &&
4390 - host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
4391 - sdhci_calc_sw_timeout(host, cmd);
4392 - sdhci_set_data_timeout_irq(host, false);
4393 - } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
4394 - sdhci_set_data_timeout_irq(host, true);
4395 - }
4396 + sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
4397 +}
4398 +EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
4399
4400 - sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
4401 - }
4402 +static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
4403 +{
4404 + if (host->ops->set_timeout)
4405 + host->ops->set_timeout(host, cmd);
4406 + else
4407 + __sdhci_set_timeout(host, cmd);
4408 }
4409
4410 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
4411 diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
4412 index fe83ece6965b..76e69288632d 100644
4413 --- a/drivers/mmc/host/sdhci.h
4414 +++ b/drivers/mmc/host/sdhci.h
4415 @@ -795,5 +795,7 @@ void sdhci_end_tuning(struct sdhci_host *host);
4416 void sdhci_reset_tuning(struct sdhci_host *host);
4417 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
4418 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode);
4419 +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
4420 +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
4421
4422 #endif /* __SDHCI_HW_H */
4423 diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
4424 index 89f6beefb01c..5750c45019d8 100644
4425 --- a/drivers/mtd/nand/spi/core.c
4426 +++ b/drivers/mtd/nand/spi/core.c
4427 @@ -568,18 +568,18 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
4428 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
4429 {
4430 struct spinand_device *spinand = nand_to_spinand(nand);
4431 + u8 marker[2] = { };
4432 struct nand_page_io_req req = {
4433 .pos = *pos,
4434 - .ooblen = 2,
4435 + .ooblen = sizeof(marker),
4436 .ooboffs = 0,
4437 - .oobbuf.in = spinand->oobbuf,
4438 + .oobbuf.in = marker,
4439 .mode = MTD_OPS_RAW,
4440 };
4441
4442 - memset(spinand->oobbuf, 0, 2);
4443 spinand_select_target(spinand, pos->target);
4444 spinand_read_page(spinand, &req, false);
4445 - if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
4446 + if (marker[0] != 0xff || marker[1] != 0xff)
4447 return true;
4448
4449 return false;
4450 @@ -603,15 +603,15 @@ static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
4451 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
4452 {
4453 struct spinand_device *spinand = nand_to_spinand(nand);
4454 + u8 marker[2] = { };
4455 struct nand_page_io_req req = {
4456 .pos = *pos,
4457 .ooboffs = 0,
4458 - .ooblen = 2,
4459 - .oobbuf.out = spinand->oobbuf,
4460 + .ooblen = sizeof(marker),
4461 + .oobbuf.out = marker,
4462 };
4463 int ret;
4464
4465 - /* Erase block before marking it bad. */
4466 ret = spinand_select_target(spinand, pos->target);
4467 if (ret)
4468 return ret;
4469 @@ -620,9 +620,6 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
4470 if (ret)
4471 return ret;
4472
4473 - spinand_erase_op(spinand, pos);
4474 -
4475 - memset(spinand->oobbuf, 0, 2);
4476 return spinand_write_page(spinand, &req);
4477 }
4478
4479 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
4480 index 58a039c3224a..af1f40cbccc8 100644
4481 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
4482 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
4483 @@ -246,6 +246,9 @@ static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta)
4484 FW_PTP_CMD_PORTID_V(0));
4485 c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
4486 c.u.ts.sc = FW_PTP_SC_ADJ_FTIME;
4487 + c.u.ts.sign = (delta < 0) ? 1 : 0;
4488 + if (delta < 0)
4489 + delta = -delta;
4490 c.u.ts.tm = cpu_to_be64(delta);
4491
4492 err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
4493 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
4494 index eb53c15b13f3..5f2d57d1b2d3 100644
4495 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
4496 +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
4497 @@ -389,7 +389,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
4498
4499 spin_unlock_bh(&cmdq->cmdq_lock);
4500
4501 - if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) {
4502 + if (!wait_for_completion_timeout(&done,
4503 + msecs_to_jiffies(CMDQ_TIMEOUT))) {
4504 spin_lock_bh(&cmdq->cmdq_lock);
4505
4506 if (cmdq->errcode[curr_prod_idx] == &errcode)
4507 @@ -623,6 +624,8 @@ static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
4508 if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
4509 return -EBUSY;
4510
4511 + dma_rmb();
4512 +
4513 errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
4514
4515 cmdq_sync_cmd_handler(cmdq, ci, errcode);
4516 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
4517 index 79b3d53f2fbf..c7c75b772a86 100644
4518 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
4519 +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
4520 @@ -360,50 +360,6 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev)
4521 return -EFAULT;
4522 }
4523
4524 -static int wait_for_io_stopped(struct hinic_hwdev *hwdev)
4525 -{
4526 - struct hinic_cmd_io_status cmd_io_status;
4527 - struct hinic_hwif *hwif = hwdev->hwif;
4528 - struct pci_dev *pdev = hwif->pdev;
4529 - struct hinic_pfhwdev *pfhwdev;
4530 - unsigned long end;
4531 - u16 out_size;
4532 - int err;
4533 -
4534 - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
4535 - dev_err(&pdev->dev, "Unsupported PCI Function type\n");
4536 - return -EINVAL;
4537 - }
4538 -
4539 - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
4540 -
4541 - cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
4542 -
4543 - end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT);
4544 - do {
4545 - err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
4546 - HINIC_COMM_CMD_IO_STATUS_GET,
4547 - &cmd_io_status, sizeof(cmd_io_status),
4548 - &cmd_io_status, &out_size,
4549 - HINIC_MGMT_MSG_SYNC);
4550 - if ((err) || (out_size != sizeof(cmd_io_status))) {
4551 - dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n",
4552 - err);
4553 - return err;
4554 - }
4555 -
4556 - if (cmd_io_status.status == IO_STOPPED) {
4557 - dev_info(&pdev->dev, "IO stopped\n");
4558 - return 0;
4559 - }
4560 -
4561 - msleep(20);
4562 - } while (time_before(jiffies, end));
4563 -
4564 - dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n");
4565 - return -ETIMEDOUT;
4566 -}
4567 -
4568 /**
4569 * clear_io_resource - set the IO resources as not active in the NIC
4570 * @hwdev: the NIC HW device
4571 @@ -423,11 +379,8 @@ static int clear_io_resources(struct hinic_hwdev *hwdev)
4572 return -EINVAL;
4573 }
4574
4575 - err = wait_for_io_stopped(hwdev);
4576 - if (err) {
4577 - dev_err(&pdev->dev, "IO has not stopped yet\n");
4578 - return err;
4579 - }
4580 + /* sleep 100ms to wait for firmware stopping I/O */
4581 + msleep(100);
4582
4583 cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
4584
4585 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
4586 index 79243b626ddb..c0b6bcb067cd 100644
4587 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
4588 +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
4589 @@ -188,7 +188,7 @@ static u8 eq_cons_idx_checksum_set(u32 val)
4590 * eq_update_ci - update the HW cons idx of event queue
4591 * @eq: the event queue to update the cons idx for
4592 **/
4593 -static void eq_update_ci(struct hinic_eq *eq)
4594 +static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
4595 {
4596 u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
4597
4598 @@ -202,7 +202,7 @@ static void eq_update_ci(struct hinic_eq *eq)
4599
4600 val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) |
4601 HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
4602 - HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED);
4603 + HINIC_EQ_CI_SET(arm_state, INT_ARMED);
4604
4605 val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
4606
4607 @@ -235,6 +235,8 @@ static void aeq_irq_handler(struct hinic_eq *eq)
4608 if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
4609 break;
4610
4611 + dma_rmb();
4612 +
4613 event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
4614 if (event >= HINIC_MAX_AEQ_EVENTS) {
4615 dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
4616 @@ -347,7 +349,7 @@ static void eq_irq_handler(void *data)
4617 else if (eq->type == HINIC_CEQ)
4618 ceq_irq_handler(eq);
4619
4620 - eq_update_ci(eq);
4621 + eq_update_ci(eq, EQ_ARMED);
4622 }
4623
4624 /**
4625 @@ -702,7 +704,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
4626 }
4627
4628 set_eq_ctrls(eq);
4629 - eq_update_ci(eq);
4630 + eq_update_ci(eq, EQ_ARMED);
4631
4632 err = alloc_eq_pages(eq);
4633 if (err) {
4634 @@ -752,18 +754,28 @@ err_req_irq:
4635 **/
4636 static void remove_eq(struct hinic_eq *eq)
4637 {
4638 - struct msix_entry *entry = &eq->msix_entry;
4639 -
4640 - free_irq(entry->vector, eq);
4641 + hinic_set_msix_state(eq->hwif, eq->msix_entry.entry,
4642 + HINIC_MSIX_DISABLE);
4643 + free_irq(eq->msix_entry.vector, eq);
4644
4645 if (eq->type == HINIC_AEQ) {
4646 struct hinic_eq_work *aeq_work = &eq->aeq_work;
4647
4648 cancel_work_sync(&aeq_work->work);
4649 + /* clear aeq_len to avoid hw access host memory */
4650 + hinic_hwif_write_reg(eq->hwif,
4651 + HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
4652 } else if (eq->type == HINIC_CEQ) {
4653 tasklet_kill(&eq->ceq_tasklet);
4654 + /* clear ceq_len to avoid hw access host memory */
4655 + hinic_hwif_write_reg(eq->hwif,
4656 + HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0);
4657 }
4658
4659 + /* update cons_idx to avoid invalid interrupt */
4660 + eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq));
4661 + eq_update_ci(eq, EQ_NOT_ARMED);
4662 +
4663 free_eq_pages(eq);
4664 }
4665
4666 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
4667 index c1a6be6bf6a8..8995e32dd1c0 100644
4668 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
4669 +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
4670 @@ -43,7 +43,7 @@
4671
4672 #define MSG_NOT_RESP 0xFFFF
4673
4674 -#define MGMT_MSG_TIMEOUT 1000
4675 +#define MGMT_MSG_TIMEOUT 5000
4676
4677 #define mgmt_to_pfhwdev(pf_mgmt) \
4678 container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
4679 @@ -267,7 +267,8 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
4680 goto unlock_sync_msg;
4681 }
4682
4683 - if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) {
4684 + if (!wait_for_completion_timeout(recv_done,
4685 + msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
4686 dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
4687 err = -ETIMEDOUT;
4688 goto unlock_sync_msg;
4689 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
4690 index 2695ad69fca6..815649e37cb1 100644
4691 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
4692 +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
4693 @@ -350,6 +350,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
4694 if (!rq_wqe)
4695 break;
4696
4697 + /* make sure we read rx_done before packet length */
4698 + dma_rmb();
4699 +
4700 cqe = rq->cqe[ci];
4701 status = be32_to_cpu(cqe->status);
4702 hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
4703 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
4704 index 0e13d1c7e474..365016450bdb 100644
4705 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
4706 +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
4707 @@ -45,7 +45,7 @@
4708
4709 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
4710
4711 -#define MIN_SKB_LEN 17
4712 +#define MIN_SKB_LEN 32
4713
4714 #define MAX_PAYLOAD_OFFSET 221
4715 #define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
4716 @@ -622,6 +622,8 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
4717 do {
4718 hw_ci = HW_CONS_IDX(sq) & wq->mask;
4719
4720 + dma_rmb();
4721 +
4722 /* Reading a WQEBB to get real WQE size and consumer index. */
4723 sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
4724 if ((!sq_wqe) ||
4725 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
4726 index e678ba379598..628fa9b2f741 100644
4727 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
4728 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
4729 @@ -2045,7 +2045,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
4730 if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
4731 (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
4732 if ((mask & VXGE_DEBUG_MASK) == mask) \
4733 - printk(fmt "\n", __VA_ARGS__); \
4734 + printk(fmt "\n", ##__VA_ARGS__); \
4735 } while (0)
4736 #else
4737 #define vxge_debug_ll(level, mask, fmt, ...)
4738 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
4739 index 59a57ff5e96a..9c86f4f9cd42 100644
4740 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
4741 +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
4742 @@ -452,49 +452,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
4743
4744 #if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
4745 #define vxge_debug_ll_config(level, fmt, ...) \
4746 - vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__)
4747 + vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
4748 #else
4749 #define vxge_debug_ll_config(level, fmt, ...)
4750 #endif
4751
4752 #if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
4753 #define vxge_debug_init(level, fmt, ...) \
4754 - vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__)
4755 + vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
4756 #else
4757 #define vxge_debug_init(level, fmt, ...)
4758 #endif
4759
4760 #if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
4761 #define vxge_debug_tx(level, fmt, ...) \
4762 - vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__)
4763 + vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
4764 #else
4765 #define vxge_debug_tx(level, fmt, ...)
4766 #endif
4767
4768 #if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
4769 #define vxge_debug_rx(level, fmt, ...) \
4770 - vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__)
4771 + vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
4772 #else
4773 #define vxge_debug_rx(level, fmt, ...)
4774 #endif
4775
4776 #if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
4777 #define vxge_debug_mem(level, fmt, ...) \
4778 - vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__)
4779 + vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
4780 #else
4781 #define vxge_debug_mem(level, fmt, ...)
4782 #endif
4783
4784 #if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
4785 #define vxge_debug_entryexit(level, fmt, ...) \
4786 - vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__)
4787 + vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
4788 #else
4789 #define vxge_debug_entryexit(level, fmt, ...)
4790 #endif
4791
4792 #if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
4793 #define vxge_debug_intr(level, fmt, ...) \
4794 - vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__)
4795 + vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
4796 #else
4797 #define vxge_debug_intr(level, fmt, ...)
4798 #endif
4799 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
4800 index 07f9067affc6..cda5b0a9e948 100644
4801 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
4802 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
4803 @@ -1720,7 +1720,7 @@ static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_d
4804
4805 ahw->reset.seq_error = 0;
4806 ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
4807 - if (p_dev->ahw->reset.buff == NULL)
4808 + if (ahw->reset.buff == NULL)
4809 return -ENOMEM;
4810
4811 p_buff = p_dev->ahw->reset.buff;
4812 diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
4813 index fbf4cbcf1a65..02cdbb22d335 100644
4814 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
4815 +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
4816 @@ -279,7 +279,6 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
4817 {
4818 struct rmnet_priv *priv = netdev_priv(dev);
4819 struct net_device *real_dev;
4820 - struct rmnet_endpoint *ep;
4821 struct rmnet_port *port;
4822 u16 mux_id;
4823
4824 @@ -294,19 +293,27 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
4825
4826 if (data[IFLA_RMNET_MUX_ID]) {
4827 mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
4828 - if (rmnet_get_endpoint(port, mux_id)) {
4829 - NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
4830 - return -EINVAL;
4831 - }
4832 - ep = rmnet_get_endpoint(port, priv->mux_id);
4833 - if (!ep)
4834 - return -ENODEV;
4835
4836 - hlist_del_init_rcu(&ep->hlnode);
4837 - hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
4838 + if (mux_id != priv->mux_id) {
4839 + struct rmnet_endpoint *ep;
4840 +
4841 + ep = rmnet_get_endpoint(port, priv->mux_id);
4842 + if (!ep)
4843 + return -ENODEV;
4844
4845 - ep->mux_id = mux_id;
4846 - priv->mux_id = mux_id;
4847 + if (rmnet_get_endpoint(port, mux_id)) {
4848 + NL_SET_ERR_MSG_MOD(extack,
4849 + "MUX ID already exists");
4850 + return -EINVAL;
4851 + }
4852 +
4853 + hlist_del_init_rcu(&ep->hlnode);
4854 + hlist_add_head_rcu(&ep->hlnode,
4855 + &port->muxed_ep[mux_id]);
4856 +
4857 + ep->mux_id = mux_id;
4858 + priv->mux_id = mux_id;
4859 + }
4860 }
4861
4862 if (data[IFLA_RMNET_FLAGS]) {
4863 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
4864 index 5150551c28be..508325cc105d 100644
4865 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
4866 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
4867 @@ -663,16 +663,22 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
4868 * In case the wake up interrupt is not passed from the platform
4869 * so the driver will continue to use the mac irq (ndev->irq)
4870 */
4871 - stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
4872 + stmmac_res->wol_irq =
4873 + platform_get_irq_byname_optional(pdev, "eth_wake_irq");
4874 if (stmmac_res->wol_irq < 0) {
4875 if (stmmac_res->wol_irq == -EPROBE_DEFER)
4876 return -EPROBE_DEFER;
4877 + dev_info(&pdev->dev, "IRQ eth_wake_irq not found\n");
4878 stmmac_res->wol_irq = stmmac_res->irq;
4879 }
4880
4881 - stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
4882 - if (stmmac_res->lpi_irq == -EPROBE_DEFER)
4883 - return -EPROBE_DEFER;
4884 + stmmac_res->lpi_irq =
4885 + platform_get_irq_byname_optional(pdev, "eth_lpi");
4886 + if (stmmac_res->lpi_irq < 0) {
4887 + if (stmmac_res->lpi_irq == -EPROBE_DEFER)
4888 + return -EPROBE_DEFER;
4889 + dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
4890 + }
4891
4892 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4893 stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
4894 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
4895 index 34121fbf32e3..bd7e757a0f92 100644
4896 --- a/drivers/net/wireless/ath/ath9k/main.c
4897 +++ b/drivers/net/wireless/ath/ath9k/main.c
4898 @@ -1457,6 +1457,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
4899 ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
4900 }
4901
4902 + if (changed & IEEE80211_CONF_CHANGE_POWER)
4903 + ath9k_set_txpower(sc, NULL);
4904 +
4905 mutex_unlock(&sc->mutex);
4906 ath9k_ps_restore(sc);
4907
4908 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
4909 index 24df3182ec9e..5b2bd603febf 100644
4910 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
4911 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
4912 @@ -6,7 +6,7 @@
4913 * GPL LICENSE SUMMARY
4914 *
4915 * Copyright(c) 2017 Intel Deutschland GmbH
4916 - * Copyright(c) 2018 - 2019 Intel Corporation
4917 + * Copyright(c) 2018 - 2020 Intel Corporation
4918 *
4919 * This program is free software; you can redistribute it and/or modify
4920 * it under the terms of version 2 of the GNU General Public License as
4921 @@ -27,7 +27,7 @@
4922 * BSD LICENSE
4923 *
4924 * Copyright(c) 2017 Intel Deutschland GmbH
4925 - * Copyright(c) 2018 - 2019 Intel Corporation
4926 + * Copyright(c) 2018 - 2020 Intel Corporation
4927 * All rights reserved.
4928 *
4929 * Redistribution and use in source and binary forms, with or without
4930 @@ -195,11 +195,13 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
4931 {
4932 u16 supp;
4933 int i, highest_mcs;
4934 + u8 nss = sta->rx_nss;
4935
4936 - for (i = 0; i < sta->rx_nss; i++) {
4937 - if (i == IWL_TLC_NSS_MAX)
4938 - break;
4939 + /* the station support only a single receive chain */
4940 + if (sta->smps_mode == IEEE80211_SMPS_STATIC)
4941 + nss = 1;
4942
4943 + for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
4944 highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
4945 if (!highest_mcs)
4946 continue;
4947 @@ -245,8 +247,13 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
4948 u16 tx_mcs_160 =
4949 le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160);
4950 int i;
4951 + u8 nss = sta->rx_nss;
4952
4953 - for (i = 0; i < sta->rx_nss && i < IWL_TLC_NSS_MAX; i++) {
4954 + /* the station support only a single receive chain */
4955 + if (sta->smps_mode == IEEE80211_SMPS_STATIC)
4956 + nss = 1;
4957 +
4958 + for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
4959 u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
4960 u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
4961 u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
4962 @@ -307,8 +314,14 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
4963 cmd->mode = IWL_TLC_MNG_MODE_HT;
4964 cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] =
4965 cpu_to_le16(ht_cap->mcs.rx_mask[0]);
4966 - cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
4967 - cpu_to_le16(ht_cap->mcs.rx_mask[1]);
4968 +
4969 + /* the station support only a single receive chain */
4970 + if (sta->smps_mode == IEEE80211_SMPS_STATIC)
4971 + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
4972 + 0;
4973 + else
4974 + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
4975 + cpu_to_le16(ht_cap->mcs.rx_mask[1]);
4976 }
4977 }
4978
4979 diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
4980 index 59474bd0c728..83ac88924f25 100644
4981 --- a/drivers/nvme/host/fc.c
4982 +++ b/drivers/nvme/host/fc.c
4983 @@ -342,8 +342,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
4984 !template->ls_req || !template->fcp_io ||
4985 !template->ls_abort || !template->fcp_abort ||
4986 !template->max_hw_queues || !template->max_sgl_segments ||
4987 - !template->max_dif_sgl_segments || !template->dma_boundary ||
4988 - !template->module) {
4989 + !template->max_dif_sgl_segments || !template->dma_boundary) {
4990 ret = -EINVAL;
4991 goto out_reghost_failed;
4992 }
4993 @@ -2016,7 +2015,6 @@ nvme_fc_ctrl_free(struct kref *ref)
4994 {
4995 struct nvme_fc_ctrl *ctrl =
4996 container_of(ref, struct nvme_fc_ctrl, ref);
4997 - struct nvme_fc_lport *lport = ctrl->lport;
4998 unsigned long flags;
4999
5000 if (ctrl->ctrl.tagset) {
5001 @@ -2043,7 +2041,6 @@ nvme_fc_ctrl_free(struct kref *ref)
5002 if (ctrl->ctrl.opts)
5003 nvmf_free_options(ctrl->ctrl.opts);
5004 kfree(ctrl);
5005 - module_put(lport->ops->module);
5006 }
5007
5008 static void
5009 @@ -3071,15 +3068,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
5010 goto out_fail;
5011 }
5012
5013 - if (!try_module_get(lport->ops->module)) {
5014 - ret = -EUNATCH;
5015 - goto out_free_ctrl;
5016 - }
5017 -
5018 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
5019 if (idx < 0) {
5020 ret = -ENOSPC;
5021 - goto out_mod_put;
5022 + goto out_free_ctrl;
5023 }
5024
5025 ctrl->ctrl.opts = opts;
5026 @@ -3232,8 +3224,6 @@ out_free_queues:
5027 out_free_ida:
5028 put_device(ctrl->dev);
5029 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
5030 -out_mod_put:
5031 - module_put(lport->ops->module);
5032 out_free_ctrl:
5033 kfree(ctrl);
5034 out_fail:
5035 diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
5036 index 1c50af6219f3..b50b53db3746 100644
5037 --- a/drivers/nvme/target/fcloop.c
5038 +++ b/drivers/nvme/target/fcloop.c
5039 @@ -850,7 +850,6 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
5040 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
5041
5042 static struct nvme_fc_port_template fctemplate = {
5043 - .module = THIS_MODULE,
5044 .localport_delete = fcloop_localport_delete,
5045 .remoteport_delete = fcloop_remoteport_delete,
5046 .create_queue = fcloop_create_queue,
5047 diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
5048 index 2fe34fd4c3f3..22014e76d771 100644
5049 --- a/drivers/nvme/target/tcp.c
5050 +++ b/drivers/nvme/target/tcp.c
5051 @@ -794,7 +794,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
5052 icresp->hdr.pdo = 0;
5053 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
5054 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
5055 - icresp->maxdata = cpu_to_le32(0xffff); /* FIXME: support r2t */
5056 + icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
5057 icresp->cpda = 0;
5058 if (queue->hdr_digest)
5059 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
5060 diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
5061 index 7e581748ee9f..70ded8900e28 100644
5062 --- a/drivers/pci/controller/dwc/pcie-qcom.c
5063 +++ b/drivers/pci/controller/dwc/pcie-qcom.c
5064 @@ -1289,7 +1289,13 @@ static void qcom_fixup_class(struct pci_dev *dev)
5065 {
5066 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
5067 }
5068 -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class);
5069 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
5070 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
5071 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
5072 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
5073 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
5074 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
5075 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
5076
5077 static struct platform_driver qcom_pcie_driver = {
5078 .probe = qcom_pcie_probe,
5079 diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
5080 index 2bf8bd1f0563..0471643cf536 100644
5081 --- a/drivers/pci/endpoint/pci-epc-mem.c
5082 +++ b/drivers/pci/endpoint/pci-epc-mem.c
5083 @@ -79,6 +79,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size,
5084 mem->page_size = page_size;
5085 mem->pages = pages;
5086 mem->size = size;
5087 + mutex_init(&mem->lock);
5088
5089 epc->mem = mem;
5090
5091 @@ -122,7 +123,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
5092 phys_addr_t *phys_addr, size_t size)
5093 {
5094 int pageno;
5095 - void __iomem *virt_addr;
5096 + void __iomem *virt_addr = NULL;
5097 struct pci_epc_mem *mem = epc->mem;
5098 unsigned int page_shift = ilog2(mem->page_size);
5099 int order;
5100 @@ -130,15 +131,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
5101 size = ALIGN(size, mem->page_size);
5102 order = pci_epc_mem_get_order(mem, size);
5103
5104 + mutex_lock(&mem->lock);
5105 pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
5106 if (pageno < 0)
5107 - return NULL;
5108 + goto ret;
5109
5110 *phys_addr = mem->phys_base + (pageno << page_shift);
5111 virt_addr = ioremap(*phys_addr, size);
5112 if (!virt_addr)
5113 bitmap_release_region(mem->bitmap, pageno, order);
5114
5115 +ret:
5116 + mutex_unlock(&mem->lock);
5117 return virt_addr;
5118 }
5119 EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
5120 @@ -164,7 +168,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
5121 pageno = (phys_addr - mem->phys_base) >> page_shift;
5122 size = ALIGN(size, mem->page_size);
5123 order = pci_epc_mem_get_order(mem, size);
5124 + mutex_lock(&mem->lock);
5125 bitmap_release_region(mem->bitmap, pageno, order);
5126 + mutex_unlock(&mem->lock);
5127 }
5128 EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
5129
5130 diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
5131 index 86d97f3112f0..d74a71712cde 100644
5132 --- a/drivers/pci/hotplug/pciehp_hpc.c
5133 +++ b/drivers/pci/hotplug/pciehp_hpc.c
5134 @@ -590,17 +590,15 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
5135 if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
5136 ret = pciehp_isr(irq, dev_id);
5137 enable_irq(irq);
5138 - if (ret != IRQ_WAKE_THREAD) {
5139 - pci_config_pm_runtime_put(pdev);
5140 - return ret;
5141 - }
5142 + if (ret != IRQ_WAKE_THREAD)
5143 + goto out;
5144 }
5145
5146 synchronize_hardirq(irq);
5147 events = atomic_xchg(&ctrl->pending_events, 0);
5148 if (!events) {
5149 - pci_config_pm_runtime_put(pdev);
5150 - return IRQ_NONE;
5151 + ret = IRQ_NONE;
5152 + goto out;
5153 }
5154
5155 /* Check Attention Button Pressed */
5156 @@ -629,10 +627,12 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
5157 pciehp_handle_presence_or_link_change(ctrl, events);
5158 up_read(&ctrl->reset_lock);
5159
5160 + ret = IRQ_HANDLED;
5161 +out:
5162 pci_config_pm_runtime_put(pdev);
5163 ctrl->ist_running = false;
5164 wake_up(&ctrl->requester);
5165 - return IRQ_HANDLED;
5166 + return ret;
5167 }
5168
5169 static int pciehp_poll(void *data)
5170 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
5171 index 652ef23bba35..32c34330e5a6 100644
5172 --- a/drivers/pci/pcie/aspm.c
5173 +++ b/drivers/pci/pcie/aspm.c
5174 @@ -742,9 +742,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
5175
5176 /* Enable what we need to enable */
5177 pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
5178 - PCI_L1SS_CAP_L1_PM_SS, val);
5179 + PCI_L1SS_CTL1_L1SS_MASK, val);
5180 pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
5181 - PCI_L1SS_CAP_L1_PM_SS, val);
5182 + PCI_L1SS_CTL1_L1SS_MASK, val);
5183 }
5184
5185 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
5186 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
5187 index 2fdceaab7307..a51b3e3f248b 100644
5188 --- a/drivers/pci/quirks.c
5189 +++ b/drivers/pci/quirks.c
5190 @@ -1970,26 +1970,92 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk
5191 /*
5192 * IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no
5193 * 300641-004US, section 5.7.3.
5194 + *
5195 + * Core IO on Xeon E5 1600/2600/4600, see Intel order no 326509-003.
5196 + * Core IO on Xeon E5 v2, see Intel order no 329188-003.
5197 + * Core IO on Xeon E7 v2, see Intel order no 329595-002.
5198 + * Core IO on Xeon E5 v3, see Intel order no 330784-003.
5199 + * Core IO on Xeon E7 v3, see Intel order no 332315-001US.
5200 + * Core IO on Xeon E5 v4, see Intel order no 333810-002US.
5201 + * Core IO on Xeon E7 v4, see Intel order no 332315-001US.
5202 + * Core IO on Xeon D-1500, see Intel order no 332051-001.
5203 + * Core IO on Xeon Scalable, see Intel order no 610950.
5204 */
5205 -#define INTEL_6300_IOAPIC_ABAR 0x40
5206 +#define INTEL_6300_IOAPIC_ABAR 0x40 /* Bus 0, Dev 29, Func 5 */
5207 #define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
5208
5209 +#define INTEL_CIPINTRC_CFG_OFFSET 0x14C /* Bus 0, Dev 5, Func 0 */
5210 +#define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25)
5211 +
5212 static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
5213 {
5214 u16 pci_config_word;
5215 + u32 pci_config_dword;
5216
5217 if (noioapicquirk)
5218 return;
5219
5220 - pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
5221 - pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
5222 - pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
5223 -
5224 + switch (dev->device) {
5225 + case PCI_DEVICE_ID_INTEL_ESB_10:
5226 + pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR,
5227 + &pci_config_word);
5228 + pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
5229 + pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR,
5230 + pci_config_word);
5231 + break;
5232 + case 0x3c28: /* Xeon E5 1600/2600/4600 */
5233 + case 0x0e28: /* Xeon E5/E7 V2 */
5234 + case 0x2f28: /* Xeon E5/E7 V3,V4 */
5235 + case 0x6f28: /* Xeon D-1500 */
5236 + case 0x2034: /* Xeon Scalable Family */
5237 + pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
5238 + &pci_config_dword);
5239 + pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH;
5240 + pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
5241 + pci_config_dword);
5242 + break;
5243 + default:
5244 + return;
5245 + }
5246 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
5247 dev->vendor, dev->device);
5248 }
5249 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
5250 -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
5251 +/*
5252 + * Device 29 Func 5 Device IDs of IO-APIC
5253 + * containing ABAR—APIC1 Alternate Base Address Register
5254 + */
5255 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
5256 + quirk_disable_intel_boot_interrupt);
5257 +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
5258 + quirk_disable_intel_boot_interrupt);
5259 +
5260 +/*
5261 + * Device 5 Func 0 Device IDs of Core IO modules/hubs
5262 + * containing Coherent Interface Protocol Interrupt Control
5263 + *
5264 + * Device IDs obtained from volume 2 datasheets of commented
5265 + * families above.
5266 + */
5267 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28,
5268 + quirk_disable_intel_boot_interrupt);
5269 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28,
5270 + quirk_disable_intel_boot_interrupt);
5271 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28,
5272 + quirk_disable_intel_boot_interrupt);
5273 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28,
5274 + quirk_disable_intel_boot_interrupt);
5275 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034,
5276 + quirk_disable_intel_boot_interrupt);
5277 +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28,
5278 + quirk_disable_intel_boot_interrupt);
5279 +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28,
5280 + quirk_disable_intel_boot_interrupt);
5281 +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28,
5282 + quirk_disable_intel_boot_interrupt);
5283 +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28,
5284 + quirk_disable_intel_boot_interrupt);
5285 +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034,
5286 + quirk_disable_intel_boot_interrupt);
5287
5288 /* Disable boot interrupts on HT-1000 */
5289 #define BC_HT1000_FEATURE_REG 0x64
5290 diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
5291 index cc43c855452f..2c9c3061894b 100644
5292 --- a/drivers/pci/switch/switchtec.c
5293 +++ b/drivers/pci/switch/switchtec.c
5294 @@ -175,7 +175,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser)
5295 kref_get(&stuser->kref);
5296 stuser->read_len = sizeof(stuser->data);
5297 stuser_set_state(stuser, MRPC_QUEUED);
5298 - init_completion(&stuser->comp);
5299 + reinit_completion(&stuser->comp);
5300 list_add_tail(&stuser->list, &stdev->mrpc_queue);
5301
5302 mrpc_cmd_submit(stdev);
5303 diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
5304 index 982f0cc8270c..41e28552b2ce 100644
5305 --- a/drivers/platform/x86/asus-wmi.c
5306 +++ b/drivers/platform/x86/asus-wmi.c
5307 @@ -418,8 +418,11 @@ static int asus_wmi_battery_add(struct power_supply *battery)
5308 {
5309 /* The WMI method does not provide a way to specific a battery, so we
5310 * just assume it is the first battery.
5311 + * Note: On some newer ASUS laptops (Zenbook UM431DA), the primary/first
5312 + * battery is named BATT.
5313 */
5314 - if (strcmp(battery->desc->name, "BAT0") != 0)
5315 + if (strcmp(battery->desc->name, "BAT0") != 0 &&
5316 + strcmp(battery->desc->name, "BATT") != 0)
5317 return -ENODEV;
5318
5319 if (device_create_file(&battery->dev,
5320 diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
5321 index de919f2e8b94..783d00131a2a 100644
5322 --- a/drivers/remoteproc/qcom_q6v5_mss.c
5323 +++ b/drivers/remoteproc/qcom_q6v5_mss.c
5324 @@ -875,11 +875,6 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
5325 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
5326 }
5327
5328 - ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
5329 - false, qproc->mpss_phys,
5330 - qproc->mpss_size);
5331 - WARN_ON(ret);
5332 -
5333 q6v5_reset_assert(qproc);
5334
5335 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
5336 @@ -909,6 +904,23 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
5337 }
5338 }
5339
5340 +static int q6v5_reload_mba(struct rproc *rproc)
5341 +{
5342 + struct q6v5 *qproc = rproc->priv;
5343 + const struct firmware *fw;
5344 + int ret;
5345 +
5346 + ret = request_firmware(&fw, rproc->firmware, qproc->dev);
5347 + if (ret < 0)
5348 + return ret;
5349 +
5350 + q6v5_load(rproc, fw);
5351 + ret = q6v5_mba_load(qproc);
5352 + release_firmware(fw);
5353 +
5354 + return ret;
5355 +}
5356 +
5357 static int q6v5_mpss_load(struct q6v5 *qproc)
5358 {
5359 const struct elf32_phdr *phdrs;
5360 @@ -969,6 +981,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
5361 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
5362 }
5363
5364 + /**
5365 + * In case of a modem subsystem restart on secure devices, the modem
5366 + * memory can be reclaimed only after MBA is loaded. For modem cold
5367 + * boot this will be a nop
5368 + */
5369 + q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
5370 + qproc->mpss_phys, qproc->mpss_size);
5371 +
5372 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
5373 qproc->mpss_reloc = mpss_reloc;
5374 /* Load firmware segments */
5375 @@ -1058,8 +1078,16 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc,
5376 void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
5377
5378 /* Unlock mba before copying segments */
5379 - if (!qproc->dump_mba_loaded)
5380 - ret = q6v5_mba_load(qproc);
5381 + if (!qproc->dump_mba_loaded) {
5382 + ret = q6v5_reload_mba(rproc);
5383 + if (!ret) {
5384 + /* Reset ownership back to Linux to copy segments */
5385 + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
5386 + false,
5387 + qproc->mpss_phys,
5388 + qproc->mpss_size);
5389 + }
5390 + }
5391
5392 if (!ptr || ret)
5393 memset(dest, 0xff, segment->size);
5394 @@ -1070,8 +1098,14 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc,
5395
5396 /* Reclaim mba after copying segments */
5397 if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
5398 - if (qproc->dump_mba_loaded)
5399 + if (qproc->dump_mba_loaded) {
5400 + /* Try to reset ownership back to Q6 */
5401 + q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
5402 + true,
5403 + qproc->mpss_phys,
5404 + qproc->mpss_size);
5405 q6v5_mba_reclaim(qproc);
5406 + }
5407 }
5408 }
5409
5410 @@ -1111,10 +1145,6 @@ static int q6v5_start(struct rproc *rproc)
5411 return 0;
5412
5413 reclaim_mpss:
5414 - xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
5415 - false, qproc->mpss_phys,
5416 - qproc->mpss_size);
5417 - WARN_ON(xfermemop_ret);
5418 q6v5_mba_reclaim(qproc);
5419
5420 return ret;
5421 diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
5422 index 8c07cb2ca8ba..31a62a0b470e 100644
5423 --- a/drivers/remoteproc/remoteproc_virtio.c
5424 +++ b/drivers/remoteproc/remoteproc_virtio.c
5425 @@ -334,6 +334,13 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
5426 struct rproc_mem_entry *mem;
5427 int ret;
5428
5429 + if (rproc->ops->kick == NULL) {
5430 + ret = -EINVAL;
5431 + dev_err(dev, ".kick method not defined for %s",
5432 + rproc->name);
5433 + goto out;
5434 + }
5435 +
5436 /* Try to find dedicated vdev buffer carveout */
5437 mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
5438 if (mem) {
5439 diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
5440 index 96f0d34e9459..cb84125ab80d 100644
5441 --- a/drivers/s390/scsi/zfcp_erp.c
5442 +++ b/drivers/s390/scsi/zfcp_erp.c
5443 @@ -725,7 +725,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
5444 adapter->peer_d_id);
5445 if (IS_ERR(port)) /* error or port already attached */
5446 return;
5447 - _zfcp_erp_port_reopen(port, 0, "ereptp1");
5448 + zfcp_erp_port_reopen(port, 0, "ereptp1");
5449 }
5450
5451 static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
5452 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
5453 index 691acbdcc46d..8943d42fc406 100644
5454 --- a/drivers/scsi/lpfc/lpfc.h
5455 +++ b/drivers/scsi/lpfc/lpfc.h
5456 @@ -742,6 +742,7 @@ struct lpfc_hba {
5457 * capability
5458 */
5459 #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
5460 +#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
5461
5462 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
5463 struct lpfc_dmabuf slim2p;
5464 @@ -1209,6 +1210,15 @@ struct lpfc_hba {
5465 uint64_t ktime_seg10_min;
5466 uint64_t ktime_seg10_max;
5467 #endif
5468 +
5469 + struct hlist_node cpuhp; /* used for cpuhp per hba callback */
5470 + struct timer_list cpuhp_poll_timer;
5471 + struct list_head poll_list; /* slowpath eq polling list */
5472 +#define LPFC_POLL_HB 1 /* slowpath heartbeat */
5473 +#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
5474 +#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
5475 +
5476 + char os_host_name[MAXHOSTNAMELEN];
5477 };
5478
5479 static inline struct Scsi_Host *
5480 diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
5481 index b2ad8c750486..0f019e889ba6 100644
5482 --- a/drivers/scsi/lpfc/lpfc_crtn.h
5483 +++ b/drivers/scsi/lpfc/lpfc_crtn.h
5484 @@ -180,7 +180,7 @@ int lpfc_issue_gidft(struct lpfc_vport *vport);
5485 int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
5486 int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
5487 int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
5488 -void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
5489 +void lpfc_fdmi_change_check(struct lpfc_vport *vport);
5490 void lpfc_delayed_disc_tmo(struct timer_list *);
5491 void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
5492
5493 @@ -215,6 +215,12 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
5494 irqreturn_t lpfc_sli4_intr_handler(int, void *);
5495 irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
5496
5497 +void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
5498 +int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
5499 +void lpfc_sli4_poll_hbtimer(struct timer_list *t);
5500 +void lpfc_sli4_start_polling(struct lpfc_queue *q);
5501 +void lpfc_sli4_stop_polling(struct lpfc_queue *q);
5502 +
5503 void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
5504 void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
5505 void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
5506 diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
5507 index f81d1453eefb..85f77c1ed23c 100644
5508 --- a/drivers/scsi/lpfc/lpfc_ct.c
5509 +++ b/drivers/scsi/lpfc/lpfc_ct.c
5510 @@ -1495,7 +1495,7 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
5511 if (strlcat(symbol, tmp, size) >= size)
5512 goto buffer_done;
5513
5514 - scnprintf(tmp, sizeof(tmp), " HN:%s", init_utsname()->nodename);
5515 + scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name);
5516 if (strlcat(symbol, tmp, size) >= size)
5517 goto buffer_done;
5518
5519 @@ -1984,14 +1984,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
5520
5521
5522 /**
5523 - * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to
5524 + * lpfc_fdmi_change_check - Check for changed FDMI parameters
5525 * @vport: pointer to a host virtual N_Port data structure.
5526 *
5527 - * Called from hbeat timeout routine to check if the number of discovered
5528 - * ports has changed. If so, re-register thar port Attribute.
5529 + * Check how many mapped NPorts we are connected to
5530 + * Check if our hostname changed
5531 + * Called from hbeat timeout routine to check if any FDMI parameters
5532 + * changed. If so, re-register those Attributes.
5533 */
5534 void
5535 -lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
5536 +lpfc_fdmi_change_check(struct lpfc_vport *vport)
5537 {
5538 struct lpfc_hba *phba = vport->phba;
5539 struct lpfc_nodelist *ndlp;
5540 @@ -2004,17 +2006,41 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
5541 if (!(vport->fc_flag & FC_FABRIC))
5542 return;
5543
5544 + ndlp = lpfc_findnode_did(vport, FDMI_DID);
5545 + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5546 + return;
5547 +
5548 + /* Check if system hostname changed */
5549 + if (strcmp(phba->os_host_name, init_utsname()->nodename)) {
5550 + memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
5551 + scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
5552 + init_utsname()->nodename);
5553 + lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
5554 +
5555 + /* Since this effects multiple HBA and PORT attributes, we need
5556 + * de-register and go thru the whole FDMI registration cycle.
5557 + * DHBA -> DPRT -> RHBA -> RPA (physical port)
5558 + * DPRT -> RPRT (vports)
5559 + */
5560 + if (vport->port_type == LPFC_PHYSICAL_PORT)
5561 + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
5562 + else
5563 + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
5564 +
5565 + /* Since this code path registers all the port attributes
5566 + * we can just return without further checking.
5567 + */
5568 + return;
5569 + }
5570 +
5571 if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc))
5572 return;
5573
5574 + /* Check if the number of mapped NPorts changed */
5575 cnt = lpfc_find_map_node(vport);
5576 if (cnt == vport->fdmi_num_disc)
5577 return;
5578
5579 - ndlp = lpfc_findnode_did(vport, FDMI_DID);
5580 - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5581 - return;
5582 -
5583 if (vport->port_type == LPFC_PHYSICAL_PORT) {
5584 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
5585 LPFC_FDMI_PORT_ATTR_num_disc);
5586 @@ -2602,8 +2628,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
5587 ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5588 memset(ae, 0, 256);
5589
5590 - snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
5591 - init_utsname()->nodename);
5592 + scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
5593 + vport->phba->os_host_name);
5594
5595 len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
5596 len += (len & 3) ? (4 - (len & 3)) : 4;
5597 diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
5598 index ee70d14e7a9d..799db8a785c2 100644
5599 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
5600 +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
5601 @@ -28,6 +28,7 @@
5602 #include <linux/kthread.h>
5603 #include <linux/interrupt.h>
5604 #include <linux/lockdep.h>
5605 +#include <linux/utsname.h>
5606
5607 #include <scsi/scsi.h>
5608 #include <scsi/scsi_device.h>
5609 @@ -1138,7 +1139,6 @@ void
5610 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5611 {
5612 struct lpfc_vport *vport = pmb->vport;
5613 - uint8_t bbscn = 0;
5614
5615 if (pmb->u.mb.mbxStatus)
5616 goto out;
5617 @@ -1163,18 +1163,15 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5618 }
5619
5620 /* Start discovery by sending a FLOGI. port_state is identically
5621 - * LPFC_FLOGI while waiting for FLOGI cmpl
5622 + * LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending
5623 + * the FLOGI is being deferred till after MBX_READ_SPARAM completes.
5624 */
5625 if (vport->port_state != LPFC_FLOGI) {
5626 - if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
5627 - bbscn = bf_get(lpfc_bbscn_def,
5628 - &phba->sli4_hba.bbscn_params);
5629 - vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
5630 - vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
5631 - }
5632 - lpfc_initial_flogi(vport);
5633 - } else if (vport->fc_flag & FC_PT2PT) {
5634 - lpfc_disc_start(vport);
5635 + if (!(phba->hba_flag & HBA_DEFER_FLOGI))
5636 + lpfc_initial_flogi(vport);
5637 + } else {
5638 + if (vport->fc_flag & FC_PT2PT)
5639 + lpfc_disc_start(vport);
5640 }
5641 return;
5642
5643 @@ -3100,6 +3097,14 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5644 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5645 kfree(mp);
5646 mempool_free(pmb, phba->mbox_mem_pool);
5647 +
5648 + /* Check if sending the FLOGI is being deferred to after we get
5649 + * up to date CSPs from MBX_READ_SPARAM.
5650 + */
5651 + if (phba->hba_flag & HBA_DEFER_FLOGI) {
5652 + lpfc_initial_flogi(vport);
5653 + phba->hba_flag &= ~HBA_DEFER_FLOGI;
5654 + }
5655 return;
5656
5657 out:
5658 @@ -3230,6 +3235,23 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
5659 }
5660
5661 lpfc_linkup(phba);
5662 + sparam_mbox = NULL;
5663 +
5664 + if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5665 + cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5666 + if (!cfglink_mbox)
5667 + goto out;
5668 + vport->port_state = LPFC_LOCAL_CFG_LINK;
5669 + lpfc_config_link(phba, cfglink_mbox);
5670 + cfglink_mbox->vport = vport;
5671 + cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
5672 + rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
5673 + if (rc == MBX_NOT_FINISHED) {
5674 + mempool_free(cfglink_mbox, phba->mbox_mem_pool);
5675 + goto out;
5676 + }
5677 + }
5678 +
5679 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5680 if (!sparam_mbox)
5681 goto out;
5682 @@ -3250,20 +3272,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
5683 goto out;
5684 }
5685
5686 - if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5687 - cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5688 - if (!cfglink_mbox)
5689 - goto out;
5690 - vport->port_state = LPFC_LOCAL_CFG_LINK;
5691 - lpfc_config_link(phba, cfglink_mbox);
5692 - cfglink_mbox->vport = vport;
5693 - cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
5694 - rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
5695 - if (rc == MBX_NOT_FINISHED) {
5696 - mempool_free(cfglink_mbox, phba->mbox_mem_pool);
5697 - goto out;
5698 - }
5699 - } else {
5700 + if (phba->hba_flag & HBA_FCOE_MODE) {
5701 vport->port_state = LPFC_VPORT_UNKNOWN;
5702 /*
5703 * Add the driver's default FCF record at FCF index 0 now. This
5704 @@ -3320,8 +3329,16 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
5705 }
5706 /* Reset FCF roundrobin bmask for new discovery */
5707 lpfc_sli4_clear_fcf_rr_bmask(phba);
5708 + } else {
5709 + if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
5710 + !(phba->link_flag & LS_LOOPBACK_MODE))
5711 + phba->hba_flag |= HBA_DEFER_FLOGI;
5712 }
5713
5714 + /* Prepare for LINK up registrations */
5715 + memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
5716 + scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
5717 + init_utsname()->nodename);
5718 return;
5719 out:
5720 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
5721 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
5722 index e8813d26e594..14d9f41977f1 100644
5723 --- a/drivers/scsi/lpfc/lpfc_init.c
5724 +++ b/drivers/scsi/lpfc/lpfc_init.c
5725 @@ -40,6 +40,7 @@
5726 #include <linux/irq.h>
5727 #include <linux/bitops.h>
5728 #include <linux/crash_dump.h>
5729 +#include <linux/cpuhotplug.h>
5730
5731 #include <scsi/scsi.h>
5732 #include <scsi/scsi_device.h>
5733 @@ -66,9 +67,13 @@
5734 #include "lpfc_version.h"
5735 #include "lpfc_ids.h"
5736
5737 +static enum cpuhp_state lpfc_cpuhp_state;
5738 /* Used when mapping IRQ vectors in a driver centric manner */
5739 static uint32_t lpfc_present_cpu;
5740
5741 +static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
5742 +static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
5743 +static void lpfc_cpuhp_add(struct lpfc_hba *phba);
5744 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
5745 static int lpfc_post_rcv_buf(struct lpfc_hba *);
5746 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
5747 @@ -1365,7 +1370,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
5748 if (vports != NULL)
5749 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5750 lpfc_rcv_seq_check_edtov(vports[i]);
5751 - lpfc_fdmi_num_disc_check(vports[i]);
5752 + lpfc_fdmi_change_check(vports[i]);
5753 }
5754 lpfc_destroy_vport_work_array(phba, vports);
5755
5756 @@ -3387,6 +3392,8 @@ lpfc_online(struct lpfc_hba *phba)
5757 if (phba->cfg_xri_rebalancing)
5758 lpfc_create_multixri_pools(phba);
5759
5760 + lpfc_cpuhp_add(phba);
5761 +
5762 lpfc_unblock_mgmt_io(phba);
5763 return 0;
5764 }
5765 @@ -3545,6 +3552,7 @@ lpfc_offline(struct lpfc_hba *phba)
5766 spin_unlock_irq(shost->host_lock);
5767 }
5768 lpfc_destroy_vport_work_array(phba, vports);
5769 + __lpfc_cpuhp_remove(phba);
5770
5771 if (phba->cfg_xri_rebalancing)
5772 lpfc_destroy_multixri_pools(phba);
5773 @@ -9160,6 +9168,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5774 }
5775 spin_unlock_irq(&phba->hbalock);
5776
5777 + lpfc_sli4_cleanup_poll_list(phba);
5778 +
5779 /* Release HBA eqs */
5780 if (phba->sli4_hba.hdwq)
5781 lpfc_sli4_release_hdwq(phba);
5782 @@ -10962,6 +10972,170 @@ found_any:
5783 return;
5784 }
5785
5786 +/**
5787 + * lpfc_cpuhp_get_eq
5788 + *
5789 + * @phba: pointer to lpfc hba data structure.
5790 + * @cpu: cpu going offline
5791 + * @eqlist:
5792 + */
5793 +static void
5794 +lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
5795 + struct list_head *eqlist)
5796 +{
5797 + struct lpfc_vector_map_info *map;
5798 + const struct cpumask *maskp;
5799 + struct lpfc_queue *eq;
5800 + unsigned int i;
5801 + cpumask_t tmp;
5802 + u16 idx;
5803 +
5804 + for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
5805 + maskp = pci_irq_get_affinity(phba->pcidev, idx);
5806 + if (!maskp)
5807 + continue;
5808 + /*
5809 + * if irq is not affinitized to the cpu going
5810 + * then we don't need to poll the eq attached
5811 + * to it.
5812 + */
5813 + if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
5814 + continue;
5815 + /* get the cpus that are online and are affini-
5816 + * tized to this irq vector. If the count is
5817 + * more than 1 then cpuhp is not going to shut-
5818 + * down this vector. Since this cpu has not
5819 + * gone offline yet, we need >1.
5820 + */
5821 + cpumask_and(&tmp, maskp, cpu_online_mask);
5822 + if (cpumask_weight(&tmp) > 1)
5823 + continue;
5824 +
5825 + /* Now that we have an irq to shutdown, get the eq
5826 + * mapped to this irq. Note: multiple hdwq's in
5827 + * the software can share an eq, but eventually
5828 + * only eq will be mapped to this vector
5829 + */
5830 + for_each_possible_cpu(i) {
5831 + map = &phba->sli4_hba.cpu_map[i];
5832 + if (!(map->irq == pci_irq_vector(phba->pcidev, idx)))
5833 + continue;
5834 + eq = phba->sli4_hba.hdwq[map->hdwq].hba_eq;
5835 + list_add(&eq->_poll_list, eqlist);
5836 + /* 1 is good enough. others will be a copy of this */
5837 + break;
5838 + }
5839 + }
5840 +}
5841 +
5842 +static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
5843 +{
5844 + if (phba->sli_rev != LPFC_SLI_REV4)
5845 + return;
5846 +
5847 + cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
5848 + &phba->cpuhp);
5849 + /*
5850 + * unregistering the instance doesn't stop the polling
5851 + * timer. Wait for the poll timer to retire.
5852 + */
5853 + synchronize_rcu();
5854 + del_timer_sync(&phba->cpuhp_poll_timer);
5855 +}
5856 +
5857 +static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
5858 +{
5859 + if (phba->pport->fc_flag & FC_OFFLINE_MODE)
5860 + return;
5861 +
5862 + __lpfc_cpuhp_remove(phba);
5863 +}
5864 +
5865 +static void lpfc_cpuhp_add(struct lpfc_hba *phba)
5866 +{
5867 + if (phba->sli_rev != LPFC_SLI_REV4)
5868 + return;
5869 +
5870 + rcu_read_lock();
5871 +
5872 + if (!list_empty(&phba->poll_list)) {
5873 + timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
5874 + mod_timer(&phba->cpuhp_poll_timer,
5875 + jiffies + msecs_to_jiffies(LPFC_POLL_HB));
5876 + }
5877 +
5878 + rcu_read_unlock();
5879 +
5880 + cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
5881 + &phba->cpuhp);
5882 +}
5883 +
5884 +static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
5885 +{
5886 + if (phba->pport->load_flag & FC_UNLOADING) {
5887 + *retval = -EAGAIN;
5888 + return true;
5889 + }
5890 +
5891 + if (phba->sli_rev != LPFC_SLI_REV4) {
5892 + *retval = 0;
5893 + return true;
5894 + }
5895 +
5896 + /* proceed with the hotplug */
5897 + return false;
5898 +}
5899 +
5900 +static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
5901 +{
5902 + struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
5903 + struct lpfc_queue *eq, *next;
5904 + LIST_HEAD(eqlist);
5905 + int retval;
5906 +
5907 + if (!phba) {
5908 + WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
5909 + return 0;
5910 + }
5911 +
5912 + if (__lpfc_cpuhp_checks(phba, &retval))
5913 + return retval;
5914 +
5915 + lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
5916 +
5917 + /* start polling on these eq's */
5918 + list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
5919 + list_del_init(&eq->_poll_list);
5920 + lpfc_sli4_start_polling(eq);
5921 + }
5922 +
5923 + return 0;
5924 +}
5925 +
5926 +static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
5927 +{
5928 + struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
5929 + struct lpfc_queue *eq, *next;
5930 + unsigned int n;
5931 + int retval;
5932 +
5933 + if (!phba) {
5934 + WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
5935 + return 0;
5936 + }
5937 +
5938 + if (__lpfc_cpuhp_checks(phba, &retval))
5939 + return retval;
5940 +
5941 + list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
5942 + n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
5943 + if (n == cpu)
5944 + lpfc_sli4_stop_polling(eq);
5945 + }
5946 +
5947 + return 0;
5948 +}
5949 +
5950 /**
5951 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
5952 * @phba: pointer to lpfc hba data structure.
5953 @@ -11367,6 +11541,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
5954 /* Wait for completion of device XRI exchange busy */
5955 lpfc_sli4_xri_exchange_busy_wait(phba);
5956
5957 + /* per-phba callback de-registration for hotplug event */
5958 + lpfc_cpuhp_remove(phba);
5959 +
5960 /* Disable PCI subsystem interrupt */
5961 lpfc_sli4_disable_intr(phba);
5962
5963 @@ -12632,6 +12809,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
5964 /* Enable RAS FW log support */
5965 lpfc_sli4_ras_setup(phba);
5966
5967 + INIT_LIST_HEAD(&phba->poll_list);
5968 + cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
5969 +
5970 return 0;
5971
5972 out_free_sysfs_attr:
5973 @@ -13450,11 +13630,24 @@ lpfc_init(void)
5974 /* Initialize in case vector mapping is needed */
5975 lpfc_present_cpu = num_present_cpus();
5976
5977 + error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
5978 + "lpfc/sli4:online",
5979 + lpfc_cpu_online, lpfc_cpu_offline);
5980 + if (error < 0)
5981 + goto cpuhp_failure;
5982 + lpfc_cpuhp_state = error;
5983 +
5984 error = pci_register_driver(&lpfc_driver);
5985 - if (error) {
5986 - fc_release_transport(lpfc_transport_template);
5987 - fc_release_transport(lpfc_vport_transport_template);
5988 - }
5989 + if (error)
5990 + goto unwind;
5991 +
5992 + return error;
5993 +
5994 +unwind:
5995 + cpuhp_remove_multi_state(lpfc_cpuhp_state);
5996 +cpuhp_failure:
5997 + fc_release_transport(lpfc_transport_template);
5998 + fc_release_transport(lpfc_vport_transport_template);
5999
6000 return error;
6001 }
6002 @@ -13471,6 +13664,7 @@ lpfc_exit(void)
6003 {
6004 misc_deregister(&lpfc_mgmt_dev);
6005 pci_unregister_driver(&lpfc_driver);
6006 + cpuhp_remove_multi_state(lpfc_cpuhp_state);
6007 fc_release_transport(lpfc_transport_template);
6008 fc_release_transport(lpfc_vport_transport_template);
6009 idr_destroy(&lpfc_hba_index);
6010 diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
6011 index 8e0f03ef346b..a227e36cbdc2 100644
6012 --- a/drivers/scsi/lpfc/lpfc_nvme.c
6013 +++ b/drivers/scsi/lpfc/lpfc_nvme.c
6014 @@ -1976,8 +1976,6 @@ out_unlock:
6015
6016 /* Declare and initialization an instance of the FC NVME template. */
6017 static struct nvme_fc_port_template lpfc_nvme_template = {
6018 - .module = THIS_MODULE,
6019 -
6020 /* initiator-based functions */
6021 .localport_delete = lpfc_nvme_localport_delete,
6022 .remoteport_delete = lpfc_nvme_remoteport_delete,
6023 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
6024 index 40706cb842fd..cbab15d299ca 100644
6025 --- a/drivers/scsi/lpfc/lpfc_scsi.c
6026 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
6027 @@ -671,8 +671,10 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
6028 lpfc_cmd->prot_data_type = 0;
6029 #endif
6030 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
6031 - if (!tmp)
6032 + if (!tmp) {
6033 + lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
6034 return NULL;
6035 + }
6036
6037 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
6038 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
6039 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
6040 index e2cec1f6e659..0717e850bcbf 100644
6041 --- a/drivers/scsi/lpfc/lpfc_sli.c
6042 +++ b/drivers/scsi/lpfc/lpfc_sli.c
6043 @@ -485,7 +485,8 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
6044 }
6045
6046 static int
6047 -lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
6048 +lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
6049 + uint8_t rearm)
6050 {
6051 struct lpfc_eqe *eqe;
6052 int count = 0, consumed = 0;
6053 @@ -519,8 +520,8 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
6054 eq->queue_claimed = 0;
6055
6056 rearm_and_exit:
6057 - /* Always clear and re-arm the EQ */
6058 - phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
6059 + /* Always clear the EQ. */
6060 + phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
6061
6062 return count;
6063 }
6064 @@ -7894,7 +7895,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
6065
6066 if (mbox_pending)
6067 /* process and rearm the EQ */
6068 - lpfc_sli4_process_eq(phba, fpeq);
6069 + lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
6070 else
6071 /* Always clear and re-arm the EQ */
6072 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
6073 @@ -10055,10 +10056,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6074 struct lpfc_iocbq *piocb, uint32_t flag)
6075 {
6076 struct lpfc_sli_ring *pring;
6077 + struct lpfc_queue *eq;
6078 unsigned long iflags;
6079 int rc;
6080
6081 if (phba->sli_rev == LPFC_SLI_REV4) {
6082 + eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
6083 +
6084 pring = lpfc_sli4_calc_ring(phba, piocb);
6085 if (unlikely(pring == NULL))
6086 return IOCB_ERROR;
6087 @@ -10066,6 +10070,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6088 spin_lock_irqsave(&pring->ring_lock, iflags);
6089 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6090 spin_unlock_irqrestore(&pring->ring_lock, iflags);
6091 +
6092 + lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
6093 } else {
6094 /* For now, SLI2/3 will still use hbalock */
6095 spin_lock_irqsave(&phba->hbalock, iflags);
6096 @@ -14245,7 +14251,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
6097 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
6098
6099 /* process and rearm the EQ */
6100 - ecount = lpfc_sli4_process_eq(phba, fpeq);
6101 + ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
6102
6103 if (unlikely(ecount == 0)) {
6104 fpeq->EQ_no_entry++;
6105 @@ -14305,6 +14311,147 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
6106 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
6107 } /* lpfc_sli4_intr_handler */
6108
6109 +void lpfc_sli4_poll_hbtimer(struct timer_list *t)
6110 +{
6111 + struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
6112 + struct lpfc_queue *eq;
6113 + int i = 0;
6114 +
6115 + rcu_read_lock();
6116 +
6117 + list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
6118 + i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
6119 + if (!list_empty(&phba->poll_list))
6120 + mod_timer(&phba->cpuhp_poll_timer,
6121 + jiffies + msecs_to_jiffies(LPFC_POLL_HB));
6122 +
6123 + rcu_read_unlock();
6124 +}
6125 +
6126 +inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
6127 +{
6128 + struct lpfc_hba *phba = eq->phba;
6129 + int i = 0;
6130 +
6131 + /*
6132 + * Unlocking an irq is one of the entry point to check
6133 + * for re-schedule, but we are good for io submission
6134 + * path as midlayer does a get_cpu to glue us in. Flush
6135 + * out the invalidate queue so we can see the updated
6136 + * value for flag.
6137 + */
6138 + smp_rmb();
6139 +
6140 + if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
6141 + /* We will not likely get the completion for the caller
6142 + * during this iteration but i guess that's fine.
6143 + * Future io's coming on this eq should be able to
6144 + * pick it up. As for the case of single io's, they
6145 + * will be handled through a sched from polling timer
6146 + * function which is currently triggered every 1msec.
6147 + */
6148 + i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
6149 +
6150 + return i;
6151 +}
6152 +
6153 +static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
6154 +{
6155 + struct lpfc_hba *phba = eq->phba;
6156 +
6157 + if (list_empty(&phba->poll_list)) {
6158 + timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
6159 + /* kickstart slowpath processing for this eq */
6160 + mod_timer(&phba->cpuhp_poll_timer,
6161 + jiffies + msecs_to_jiffies(LPFC_POLL_HB));
6162 + }
6163 +
6164 + list_add_rcu(&eq->_poll_list, &phba->poll_list);
6165 + synchronize_rcu();
6166 +}
6167 +
6168 +static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
6169 +{
6170 + struct lpfc_hba *phba = eq->phba;
6171 +
6172 + /* Disable slowpath processing for this eq. Kick start the eq
6173 + * by RE-ARMING the eq's ASAP
6174 + */
6175 + list_del_rcu(&eq->_poll_list);
6176 + synchronize_rcu();
6177 +
6178 + if (list_empty(&phba->poll_list))
6179 + del_timer_sync(&phba->cpuhp_poll_timer);
6180 +}
6181 +
6182 +void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
6183 +{
6184 + struct lpfc_queue *eq, *next;
6185 +
6186 + list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
6187 + list_del(&eq->_poll_list);
6188 +
6189 + INIT_LIST_HEAD(&phba->poll_list);
6190 + synchronize_rcu();
6191 +}
6192 +
6193 +static inline void
6194 +__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
6195 +{
6196 + if (mode == eq->mode)
6197 + return;
6198 + /*
6199 + * currently this function is only called during a hotplug
6200 + * event and the cpu on which this function is executing
6201 + * is going offline. By now the hotplug has instructed
6202 + * the scheduler to remove this cpu from cpu active mask.
6203 + * So we don't need to work about being put aside by the
6204 + * scheduler for a high priority process. Yes, the inte-
6205 + * rrupts could come but they are known to retire ASAP.
6206 + */
6207 +
6208 + /* Disable polling in the fastpath */
6209 + WRITE_ONCE(eq->mode, mode);
6210 + /* flush out the store buffer */
6211 + smp_wmb();
6212 +
6213 + /*
6214 + * Add this eq to the polling list and start polling. For
6215 + * a grace period both interrupt handler and poller will
6216 + * try to process the eq _but_ that's fine. We have a
6217 + * synchronization mechanism in place (queue_claimed) to
6218 + * deal with it. This is just a draining phase for int-
6219 + * errupt handler (not eq's) as we have guranteed through
6220 + * barrier that all the CPUs have seen the new CQ_POLLED
6221 + * state. which will effectively disable the REARMING of
6222 + * the EQ. The whole idea is eq's die off eventually as
6223 + * we are not rearming EQ's anymore.
6224 + */
6225 + mode ? lpfc_sli4_add_to_poll_list(eq) :
6226 + lpfc_sli4_remove_from_poll_list(eq);
6227 +}
6228 +
6229 +void lpfc_sli4_start_polling(struct lpfc_queue *eq)
6230 +{
6231 + __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
6232 +}
6233 +
6234 +void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
6235 +{
6236 + struct lpfc_hba *phba = eq->phba;
6237 +
6238 + __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
6239 +
6240 + /* Kick start for the pending io's in h/w.
6241 + * Once we switch back to interrupt processing on a eq
6242 + * the io path completion will only arm eq's when it
6243 + * receives a completion. But since eq's are in disa-
6244 + * rmed state it doesn't receive a completion. This
6245 + * creates a deadlock scenaro.
6246 + */
6247 + phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
6248 +}
6249 +
6250 /**
6251 * lpfc_sli4_queue_free - free a queue structure and associated memory
6252 * @queue: The queue structure to free.
6253 @@ -14379,6 +14526,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
6254 return NULL;
6255
6256 INIT_LIST_HEAD(&queue->list);
6257 + INIT_LIST_HEAD(&queue->_poll_list);
6258 INIT_LIST_HEAD(&queue->wq_list);
6259 INIT_LIST_HEAD(&queue->wqfull_list);
6260 INIT_LIST_HEAD(&queue->page_list);
6261 @@ -19698,6 +19846,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
6262
6263 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
6264 spin_unlock_irqrestore(&pring->ring_lock, iflags);
6265 +
6266 + lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
6267 return 0;
6268 }
6269
6270 @@ -19718,6 +19868,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
6271 }
6272 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
6273 spin_unlock_irqrestore(&pring->ring_lock, iflags);
6274 +
6275 + lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
6276 return 0;
6277 }
6278
6279 @@ -19746,6 +19898,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
6280 }
6281 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
6282 spin_unlock_irqrestore(&pring->ring_lock, iflags);
6283 +
6284 + lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
6285 return 0;
6286 }
6287 return WQE_ERROR;
6288 diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
6289 index 0d4882a9e634..c60a636a8894 100644
6290 --- a/drivers/scsi/lpfc/lpfc_sli4.h
6291 +++ b/drivers/scsi/lpfc/lpfc_sli4.h
6292 @@ -133,6 +133,23 @@ struct lpfc_rqb {
6293 struct lpfc_queue {
6294 struct list_head list;
6295 struct list_head wq_list;
6296 +
6297 + /*
6298 + * If interrupts are in effect on _all_ the eq's the footprint
6299 + * of polling code is zero (except mode). This memory is chec-
6300 + * ked for every io to see if the io needs to be polled and
6301 + * while completion to check if the eq's needs to be rearmed.
6302 + * Keep in same cacheline as the queue ptr to avoid cpu fetch
6303 + * stalls. Using 1B memory will leave us with 7B hole. Fill
6304 + * it with other frequently used members.
6305 + */
6306 + uint16_t last_cpu; /* most recent cpu */
6307 + uint16_t hdwq;
6308 + uint8_t qe_valid;
6309 + uint8_t mode; /* interrupt or polling */
6310 +#define LPFC_EQ_INTERRUPT 0
6311 +#define LPFC_EQ_POLL 1
6312 +
6313 struct list_head wqfull_list;
6314 enum lpfc_sli4_queue_type type;
6315 enum lpfc_sli4_queue_subtype subtype;
6316 @@ -239,10 +256,8 @@ struct lpfc_queue {
6317 struct delayed_work sched_spwork;
6318
6319 uint64_t isr_timestamp;
6320 - uint16_t hdwq;
6321 - uint16_t last_cpu; /* most recent cpu */
6322 - uint8_t qe_valid;
6323 struct lpfc_queue *assoc_qp;
6324 + struct list_head _poll_list;
6325 void **q_pgs; /* array to index entries per page */
6326 };
6327
6328 diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
6329 index c8e512ba6d39..aff630fccb07 100644
6330 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
6331 +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
6332 @@ -9747,8 +9747,8 @@ static void scsih_remove(struct pci_dev *pdev)
6333
6334 ioc->remove_host = 1;
6335
6336 - mpt3sas_wait_for_commands_to_complete(ioc);
6337 - _scsih_flush_running_cmds(ioc);
6338 + if (!pci_device_is_present(pdev))
6339 + _scsih_flush_running_cmds(ioc);
6340
6341 _scsih_fw_event_cleanup_queue(ioc);
6342
6343 @@ -9831,8 +9831,8 @@ scsih_shutdown(struct pci_dev *pdev)
6344
6345 ioc->remove_host = 1;
6346
6347 - mpt3sas_wait_for_commands_to_complete(ioc);
6348 - _scsih_flush_running_cmds(ioc);
6349 + if (!pci_device_is_present(pdev))
6350 + _scsih_flush_running_cmds(ioc);
6351
6352 _scsih_fw_event_cleanup_queue(ioc);
6353
6354 diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
6355 index bfcd02fdf2b8..941aa53363f5 100644
6356 --- a/drivers/scsi/qla2xxx/qla_nvme.c
6357 +++ b/drivers/scsi/qla2xxx/qla_nvme.c
6358 @@ -610,7 +610,6 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
6359 }
6360
6361 static struct nvme_fc_port_template qla_nvme_fc_transport = {
6362 - .module = THIS_MODULE,
6363 .localport_delete = qla_nvme_localport_delete,
6364 .remoteport_delete = qla_nvme_remoteport_delete,
6365 .create_queue = qla_nvme_alloc_queue,
6366 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
6367 index d9ea0ae4f374..a6863b51a1d0 100644
6368 --- a/drivers/scsi/ufs/ufshcd.c
6369 +++ b/drivers/scsi/ufs/ufshcd.c
6370 @@ -5467,7 +5467,8 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
6371 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
6372 u32 intr_mask)
6373 {
6374 - if (!ufshcd_is_auto_hibern8_supported(hba))
6375 + if (!ufshcd_is_auto_hibern8_supported(hba) ||
6376 + !ufshcd_is_auto_hibern8_enabled(hba))
6377 return false;
6378
6379 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
6380 diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
6381 index 5260e594e0b9..4f1dec68a853 100644
6382 --- a/drivers/scsi/ufs/ufshcd.h
6383 +++ b/drivers/scsi/ufs/ufshcd.h
6384 @@ -55,6 +55,7 @@
6385 #include <linux/clk.h>
6386 #include <linux/completion.h>
6387 #include <linux/regulator/consumer.h>
6388 +#include <linux/bitfield.h>
6389 #include "unipro.h"
6390
6391 #include <asm/irq.h>
6392 @@ -771,6 +772,11 @@ static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
6393 return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
6394 }
6395
6396 +static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
6397 +{
6398 + return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
6399 +}
6400 +
6401 #define ufshcd_writel(hba, val, reg) \
6402 writel((val), (hba)->mmio_base + (reg))
6403 #define ufshcd_readl(hba, reg) \
6404 diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
6405 index 70014ecce2a7..7b642c330977 100644
6406 --- a/drivers/soc/fsl/dpio/dpio-driver.c
6407 +++ b/drivers/soc/fsl/dpio/dpio-driver.c
6408 @@ -233,10 +233,6 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
6409 goto err_allocate_irqs;
6410 }
6411
6412 - err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
6413 - if (err)
6414 - goto err_register_dpio_irq;
6415 -
6416 priv->io = dpaa2_io_create(&desc, dev);
6417 if (!priv->io) {
6418 dev_err(dev, "dpaa2_io_create failed\n");
6419 @@ -244,6 +240,10 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
6420 goto err_dpaa2_io_create;
6421 }
6422
6423 + err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
6424 + if (err)
6425 + goto err_register_dpio_irq;
6426 +
6427 dev_info(dev, "probed\n");
6428 dev_dbg(dev, " receives_notifications = %d\n",
6429 desc.receives_notifications);
6430 diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
6431 index d47bd26577b3..68e33457c814 100644
6432 --- a/drivers/spi/spi-fsl-dspi.c
6433 +++ b/drivers/spi/spi-fsl-dspi.c
6434 @@ -192,8 +192,7 @@ struct fsl_dspi {
6435 u8 bytes_per_word;
6436 const struct fsl_dspi_devtype_data *devtype_data;
6437
6438 - wait_queue_head_t waitq;
6439 - u32 waitflags;
6440 + struct completion xfer_done;
6441
6442 struct fsl_dspi_dma *dma;
6443 };
6444 @@ -703,10 +702,8 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
6445 if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)))
6446 return IRQ_NONE;
6447
6448 - if (dspi_rxtx(dspi) == 0) {
6449 - dspi->waitflags = 1;
6450 - wake_up_interruptible(&dspi->waitq);
6451 - }
6452 + if (dspi_rxtx(dspi) == 0)
6453 + complete(&dspi->xfer_done);
6454
6455 return IRQ_HANDLED;
6456 }
6457 @@ -800,13 +797,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
6458 status = dspi_poll(dspi);
6459 } while (status == -EINPROGRESS);
6460 } else if (trans_mode != DSPI_DMA_MODE) {
6461 - status = wait_event_interruptible(dspi->waitq,
6462 - dspi->waitflags);
6463 - dspi->waitflags = 0;
6464 + wait_for_completion(&dspi->xfer_done);
6465 + reinit_completion(&dspi->xfer_done);
6466 }
6467 - if (status)
6468 - dev_err(&dspi->pdev->dev,
6469 - "Waiting for transfer to complete failed!\n");
6470
6471 if (transfer->delay_usecs)
6472 udelay(transfer->delay_usecs);
6473 @@ -1122,7 +1115,7 @@ static int dspi_probe(struct platform_device *pdev)
6474 goto out_clk_put;
6475 }
6476
6477 - init_waitqueue_head(&dspi->waitq);
6478 + init_completion(&dspi->xfer_done);
6479
6480 poll_mode:
6481 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
6482 diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c
6483 index 6f0cd0784786..c5a262a12e40 100644
6484 --- a/drivers/staging/media/allegro-dvt/allegro-core.c
6485 +++ b/drivers/staging/media/allegro-dvt/allegro-core.c
6486 @@ -393,7 +393,10 @@ struct mcu_msg_create_channel {
6487 u32 freq_ird;
6488 u32 freq_lt;
6489 u32 gdr_mode;
6490 - u32 gop_length;
6491 + u16 gop_length;
6492 + u8 num_b;
6493 + u8 freq_golden_ref;
6494 +
6495 u32 unknown39;
6496
6497 u32 subframe_latency;
6498 diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
6499 index ecd34a7db190..8b76f1f13b06 100644
6500 --- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
6501 +++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
6502 @@ -67,12 +67,17 @@ hantro_h1_jpeg_enc_set_qtable(struct hantro_dev *vpu,
6503 unsigned char *chroma_qtable)
6504 {
6505 u32 reg, i;
6506 + __be32 *luma_qtable_p;
6507 + __be32 *chroma_qtable_p;
6508 +
6509 + luma_qtable_p = (__be32 *)luma_qtable;
6510 + chroma_qtable_p = (__be32 *)chroma_qtable;
6511
6512 for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) {
6513 - reg = get_unaligned_be32(&luma_qtable[i]);
6514 + reg = get_unaligned_be32(&luma_qtable_p[i]);
6515 vepu_write_relaxed(vpu, reg, H1_REG_JPEG_LUMA_QUAT(i));
6516
6517 - reg = get_unaligned_be32(&chroma_qtable[i]);
6518 + reg = get_unaligned_be32(&chroma_qtable_p[i]);
6519 vepu_write_relaxed(vpu, reg, H1_REG_JPEG_CHROMA_QUAT(i));
6520 }
6521 }
6522 diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
6523 index 06162f569b5e..4f9272e5b8d9 100644
6524 --- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
6525 +++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
6526 @@ -98,12 +98,17 @@ rk3399_vpu_jpeg_enc_set_qtable(struct hantro_dev *vpu,
6527 unsigned char *chroma_qtable)
6528 {
6529 u32 reg, i;
6530 + __be32 *luma_qtable_p;
6531 + __be32 *chroma_qtable_p;
6532 +
6533 + luma_qtable_p = (__be32 *)luma_qtable;
6534 + chroma_qtable_p = (__be32 *)chroma_qtable;
6535
6536 for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
6537 - reg = get_unaligned_be32(&luma_qtable[i]);
6538 + reg = get_unaligned_be32(&luma_qtable_p[i]);
6539 vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i));
6540
6541 - reg = get_unaligned_be32(&chroma_qtable[i]);
6542 + reg = get_unaligned_be32(&chroma_qtable_p[i]);
6543 vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i));
6544 }
6545 }
6546 diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
6547 index bfd6b5fbf484..d24897d06947 100644
6548 --- a/drivers/staging/media/imx/imx7-media-csi.c
6549 +++ b/drivers/staging/media/imx/imx7-media-csi.c
6550 @@ -1009,6 +1009,7 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
6551 sdformat->format.width = in_fmt->width;
6552 sdformat->format.height = in_fmt->height;
6553 sdformat->format.code = in_fmt->code;
6554 + sdformat->format.field = in_fmt->field;
6555 *cc = in_cc;
6556
6557 sdformat->format.colorspace = in_fmt->colorspace;
6558 @@ -1023,6 +1024,9 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
6559 false);
6560 sdformat->format.code = (*cc)->codes[0];
6561 }
6562 +
6563 + if (sdformat->format.field != V4L2_FIELD_INTERLACED)
6564 + sdformat->format.field = V4L2_FIELD_NONE;
6565 break;
6566 default:
6567 return -EINVAL;
6568 diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
6569 index e50b1f88e25b..6f628195c4da 100644
6570 --- a/drivers/staging/media/imx/imx7-mipi-csis.c
6571 +++ b/drivers/staging/media/imx/imx7-mipi-csis.c
6572 @@ -579,7 +579,7 @@ static int mipi_csis_s_stream(struct v4l2_subdev *mipi_sd, int enable)
6573 state->flags |= ST_STREAMING;
6574 } else {
6575 v4l2_subdev_call(state->src_sd, video, s_stream, 0);
6576 - ret = v4l2_subdev_call(state->src_sd, core, s_power, 1);
6577 + ret = v4l2_subdev_call(state->src_sd, core, s_power, 0);
6578 mipi_csis_stop_stream(state);
6579 state->flags &= ~ST_STREAMING;
6580 if (state->debug)
6581 diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
6582 index 771d8cb68dc1..02f551536e18 100644
6583 --- a/drivers/staging/wilc1000/wilc_wlan.c
6584 +++ b/drivers/staging/wilc1000/wilc_wlan.c
6585 @@ -578,7 +578,6 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
6586 entries = ((reg >> 3) & 0x3f);
6587 break;
6588 }
6589 - release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
6590 } while (--timeout);
6591 if (timeout <= 0) {
6592 ret = func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x0);
6593 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
6594 index cede7a8e3605..526c275ad0bc 100644
6595 --- a/drivers/usb/dwc3/core.c
6596 +++ b/drivers/usb/dwc3/core.c
6597 @@ -992,6 +992,9 @@ static int dwc3_core_init(struct dwc3 *dwc)
6598 if (dwc->dis_tx_ipgap_linecheck_quirk)
6599 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
6600
6601 + if (dwc->parkmode_disable_ss_quirk)
6602 + reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
6603 +
6604 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
6605 }
6606
6607 @@ -1305,6 +1308,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
6608 "snps,dis-del-phy-power-chg-quirk");
6609 dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
6610 "snps,dis-tx-ipgap-linecheck-quirk");
6611 + dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
6612 + "snps,parkmode-disable-ss-quirk");
6613
6614 dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
6615 "snps,tx_de_emphasis_quirk");
6616 diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
6617 index 77c4a9abe365..3ecc69c5b150 100644
6618 --- a/drivers/usb/dwc3/core.h
6619 +++ b/drivers/usb/dwc3/core.h
6620 @@ -249,6 +249,7 @@
6621 #define DWC3_GUCTL_HSTINAUTORETRY BIT(14)
6622
6623 /* Global User Control 1 Register */
6624 +#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17)
6625 #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28)
6626 #define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24)
6627
6628 @@ -1024,6 +1025,8 @@ struct dwc3_scratchpad_array {
6629 * change quirk.
6630 * @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
6631 * check during HS transmit.
6632 + * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
6633 + * instances in park mode.
6634 * @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
6635 * @tx_de_emphasis: Tx de-emphasis value
6636 * 0 - -6dB de-emphasis
6637 @@ -1215,6 +1218,7 @@ struct dwc3 {
6638 unsigned dis_u2_freeclk_exists_quirk:1;
6639 unsigned dis_del_phy_power_chg_quirk:1;
6640 unsigned dis_tx_ipgap_linecheck_quirk:1;
6641 + unsigned parkmode_disable_ss_quirk:1;
6642
6643 unsigned tx_de_emphasis_quirk:1;
6644 unsigned tx_de_emphasis:2;
6645 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
6646 index d7871636fced..d98ca1566e95 100644
6647 --- a/drivers/usb/gadget/composite.c
6648 +++ b/drivers/usb/gadget/composite.c
6649 @@ -861,6 +861,11 @@ static int set_config(struct usb_composite_dev *cdev,
6650 else
6651 power = min(power, 900U);
6652 done:
6653 + if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
6654 + usb_gadget_set_selfpowered(gadget);
6655 + else
6656 + usb_gadget_clear_selfpowered(gadget);
6657 +
6658 usb_gadget_vbus_draw(gadget, power);
6659 if (result >= 0 && cdev->delayed_status)
6660 result = USB_GADGET_DELAYED_STATUS;
6661 @@ -2279,6 +2284,7 @@ void composite_suspend(struct usb_gadget *gadget)
6662
6663 cdev->suspended = 1;
6664
6665 + usb_gadget_set_selfpowered(gadget);
6666 usb_gadget_vbus_draw(gadget, 2);
6667 }
6668
6669 @@ -2307,6 +2313,9 @@ void composite_resume(struct usb_gadget *gadget)
6670 else
6671 maxpower = min(maxpower, 900U);
6672
6673 + if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW)
6674 + usb_gadget_clear_selfpowered(gadget);
6675 +
6676 usb_gadget_vbus_draw(gadget, maxpower);
6677 }
6678
6679 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
6680 index a9a711e04614..87fdeb042c67 100644
6681 --- a/drivers/usb/gadget/function/f_fs.c
6682 +++ b/drivers/usb/gadget/function/f_fs.c
6683 @@ -1120,6 +1120,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
6684
6685 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
6686 if (unlikely(ret)) {
6687 + io_data->req = NULL;
6688 usb_ep_free_request(ep->ep, req);
6689 goto error_lock;
6690 }
6691 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
6692 index 9b3b1b16eafb..2f49a7b3ce85 100644
6693 --- a/drivers/usb/host/xhci.c
6694 +++ b/drivers/usb/host/xhci.c
6695 @@ -1157,8 +1157,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
6696 xhci_dbg(xhci, "Stop HCD\n");
6697 xhci_halt(xhci);
6698 xhci_zero_64b_regs(xhci);
6699 - xhci_reset(xhci);
6700 + retval = xhci_reset(xhci);
6701 spin_unlock_irq(&xhci->lock);
6702 + if (retval)
6703 + return retval;
6704 xhci_cleanup_msix(xhci);
6705
6706 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
6707 diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
6708 index ae1a5eb98620..1e2769010089 100644
6709 --- a/drivers/vfio/platform/vfio_platform.c
6710 +++ b/drivers/vfio/platform/vfio_platform.c
6711 @@ -44,7 +44,7 @@ static int get_platform_irq(struct vfio_platform_device *vdev, int i)
6712 {
6713 struct platform_device *pdev = (struct platform_device *) vdev->opaque;
6714
6715 - return platform_get_irq(pdev, i);
6716 + return platform_get_irq_optional(pdev, i);
6717 }
6718
6719 static int vfio_platform_probe(struct platform_device *pdev)
6720 diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
6721 index ef1d09f8920b..52aa90fb4fbd 100644
6722 --- a/fs/afs/rxrpc.c
6723 +++ b/fs/afs/rxrpc.c
6724 @@ -414,7 +414,8 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
6725 afs_wake_up_async_call :
6726 afs_wake_up_call_waiter),
6727 call->upgrade,
6728 - call->intr,
6729 + (call->intr ? RXRPC_PREINTERRUPTIBLE :
6730 + RXRPC_UNINTERRUPTIBLE),
6731 call->debug_id);
6732 if (IS_ERR(rxcall)) {
6733 ret = PTR_ERR(rxcall);
6734 diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
6735 index 3f3110975f88..11be02459b87 100644
6736 --- a/fs/btrfs/async-thread.c
6737 +++ b/fs/btrfs/async-thread.c
6738 @@ -402,3 +402,11 @@ void btrfs_set_work_high_priority(struct btrfs_work *work)
6739 {
6740 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
6741 }
6742 +
6743 +void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
6744 +{
6745 + if (wq->high)
6746 + flush_workqueue(wq->high->normal_wq);
6747 +
6748 + flush_workqueue(wq->normal->normal_wq);
6749 +}
6750 diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
6751 index c5bf2b117c05..714ab6855423 100644
6752 --- a/fs/btrfs/async-thread.h
6753 +++ b/fs/btrfs/async-thread.h
6754 @@ -44,5 +44,6 @@ void btrfs_set_work_high_priority(struct btrfs_work *work);
6755 struct btrfs_fs_info *btrfs_work_owner(const struct btrfs_work *work);
6756 struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq);
6757 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq);
6758 +void btrfs_flush_workqueue(struct btrfs_workqueue *wq);
6759
6760 #endif
6761 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
6762 index c7a53e79c66d..5bcccfbcc7c1 100644
6763 --- a/fs/btrfs/delayed-inode.c
6764 +++ b/fs/btrfs/delayed-inode.c
6765 @@ -6,6 +6,7 @@
6766
6767 #include <linux/slab.h>
6768 #include <linux/iversion.h>
6769 +#include <linux/sched/mm.h>
6770 #include "misc.h"
6771 #include "delayed-inode.h"
6772 #include "disk-io.h"
6773 @@ -804,11 +805,14 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
6774 struct btrfs_delayed_item *delayed_item)
6775 {
6776 struct extent_buffer *leaf;
6777 + unsigned int nofs_flag;
6778 char *ptr;
6779 int ret;
6780
6781 + nofs_flag = memalloc_nofs_save();
6782 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
6783 delayed_item->data_len);
6784 + memalloc_nofs_restore(nofs_flag);
6785 if (ret < 0 && ret != -EEXIST)
6786 return ret;
6787
6788 @@ -936,6 +940,7 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
6789 struct btrfs_delayed_node *node)
6790 {
6791 struct btrfs_delayed_item *curr, *prev;
6792 + unsigned int nofs_flag;
6793 int ret = 0;
6794
6795 do_again:
6796 @@ -944,7 +949,9 @@ do_again:
6797 if (!curr)
6798 goto delete_fail;
6799
6800 + nofs_flag = memalloc_nofs_save();
6801 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
6802 + memalloc_nofs_restore(nofs_flag);
6803 if (ret < 0)
6804 goto delete_fail;
6805 else if (ret > 0) {
6806 @@ -1011,6 +1018,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
6807 struct btrfs_key key;
6808 struct btrfs_inode_item *inode_item;
6809 struct extent_buffer *leaf;
6810 + unsigned int nofs_flag;
6811 int mod;
6812 int ret;
6813
6814 @@ -1023,7 +1031,9 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
6815 else
6816 mod = 1;
6817
6818 + nofs_flag = memalloc_nofs_save();
6819 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
6820 + memalloc_nofs_restore(nofs_flag);
6821 if (ret > 0) {
6822 btrfs_release_path(path);
6823 return -ENOENT;
6824 @@ -1074,7 +1084,10 @@ search:
6825
6826 key.type = BTRFS_INODE_EXTREF_KEY;
6827 key.offset = -1;
6828 +
6829 + nofs_flag = memalloc_nofs_save();
6830 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
6831 + memalloc_nofs_restore(nofs_flag);
6832 if (ret < 0)
6833 goto err_out;
6834 ASSERT(ret);
6835 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
6836 index 5cdd1b51285b..273d1ccdd45d 100644
6837 --- a/fs/btrfs/disk-io.c
6838 +++ b/fs/btrfs/disk-io.c
6839 @@ -3057,6 +3057,18 @@ retry_root_backup:
6840 fs_info->generation = generation;
6841 fs_info->last_trans_committed = generation;
6842
6843 + /*
6844 + * If we have a uuid root and we're not being told to rescan we need to
6845 + * check the generation here so we can set the
6846 + * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the
6847 + * transaction during a balance or the log replay without updating the
6848 + * uuid generation, and then if we crash we would rescan the uuid tree,
6849 + * even though it was perfectly fine.
6850 + */
6851 + if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
6852 + fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
6853 + set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
6854 +
6855 ret = btrfs_verify_dev_extents(fs_info);
6856 if (ret) {
6857 btrfs_err(fs_info,
6858 @@ -3287,8 +3299,6 @@ retry_root_backup:
6859 close_ctree(fs_info);
6860 return ret;
6861 }
6862 - } else {
6863 - set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
6864 }
6865 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
6866
6867 @@ -4007,6 +4017,19 @@ void close_ctree(struct btrfs_fs_info *fs_info)
6868 */
6869 btrfs_delete_unused_bgs(fs_info);
6870
6871 + /*
6872 + * There might be existing delayed inode workers still running
6873 + * and holding an empty delayed inode item. We must wait for
6874 + * them to complete first because they can create a transaction.
6875 + * This happens when someone calls btrfs_balance_delayed_items()
6876 + * and then a transaction commit runs the same delayed nodes
6877 + * before any delayed worker has done something with the nodes.
6878 + * We must wait for any worker here and not at transaction
6879 + * commit time since that could cause a deadlock.
6880 + * This is a very rare case.
6881 + */
6882 + btrfs_flush_workqueue(fs_info->delayed_workers);
6883 +
6884 ret = btrfs_commit_super(fs_info);
6885 if (ret)
6886 btrfs_err(fs_info, "commit super ret %d", ret);
6887 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
6888 index 284540cdbbd9..8aab286f2028 100644
6889 --- a/fs/btrfs/extent_io.c
6890 +++ b/fs/btrfs/extent_io.c
6891 @@ -3928,6 +3928,7 @@ int btree_write_cache_pages(struct address_space *mapping,
6892 .extent_locked = 0,
6893 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
6894 };
6895 + struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
6896 int ret = 0;
6897 int done = 0;
6898 int nr_to_write_done = 0;
6899 @@ -4041,7 +4042,39 @@ retry:
6900 end_write_bio(&epd, ret);
6901 return ret;
6902 }
6903 - ret = flush_write_bio(&epd);
6904 + /*
6905 + * If something went wrong, don't allow any metadata write bio to be
6906 + * submitted.
6907 + *
6908 + * This would prevent use-after-free if we had dirty pages not
6909 + * cleaned up, which can still happen by fuzzed images.
6910 + *
6911 + * - Bad extent tree
6912 + * Allowing existing tree block to be allocated for other trees.
6913 + *
6914 + * - Log tree operations
6915 + * Exiting tree blocks get allocated to log tree, bumps its
6916 + * generation, then get cleaned in tree re-balance.
6917 + * Such tree block will not be written back, since it's clean,
6918 + * thus no WRITTEN flag set.
6919 + * And after log writes back, this tree block is not traced by
6920 + * any dirty extent_io_tree.
6921 + *
6922 + * - Offending tree block gets re-dirtied from its original owner
6923 + * Since it has bumped generation, no WRITTEN flag, it can be
6924 + * reused without COWing. This tree block will not be traced
6925 + * by btrfs_transaction::dirty_pages.
6926 + *
6927 + * Now such dirty tree block will not be cleaned by any dirty
6928 + * extent io tree. Thus we don't want to submit such wild eb
6929 + * if the fs already has error.
6930 + */
6931 + if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
6932 + ret = flush_write_bio(&epd);
6933 + } else {
6934 + ret = -EUCLEAN;
6935 + end_write_bio(&epd, ret);
6936 + }
6937 return ret;
6938 }
6939
6940 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
6941 index 5739b8fc7fff..3cfbccacef7f 100644
6942 --- a/fs/btrfs/file.c
6943 +++ b/fs/btrfs/file.c
6944 @@ -2073,6 +2073,16 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
6945
6946 btrfs_init_log_ctx(&ctx, inode);
6947
6948 + /*
6949 + * Set the range to full if the NO_HOLES feature is not enabled.
6950 + * This is to avoid missing file extent items representing holes after
6951 + * replaying the log.
6952 + */
6953 + if (!btrfs_fs_incompat(fs_info, NO_HOLES)) {
6954 + start = 0;
6955 + end = LLONG_MAX;
6956 + }
6957 +
6958 /*
6959 * We write the dirty pages in the range and wait until they complete
6960 * out of the ->i_mutex. If so, we can flush the dirty pages by
6961 @@ -2127,6 +2137,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
6962 */
6963 ret = start_ordered_ops(inode, start, end);
6964 if (ret) {
6965 + up_write(&BTRFS_I(inode)->dio_sem);
6966 inode_unlock(inode);
6967 goto out;
6968 }
6969 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
6970 index 286c8c11c8d3..590defdf8860 100644
6971 --- a/fs/btrfs/qgroup.c
6972 +++ b/fs/btrfs/qgroup.c
6973 @@ -1030,6 +1030,7 @@ out_add_root:
6974 ret = qgroup_rescan_init(fs_info, 0, 1);
6975 if (!ret) {
6976 qgroup_rescan_zero_tracking(fs_info);
6977 + fs_info->qgroup_rescan_running = true;
6978 btrfs_queue_work(fs_info->qgroup_rescan_workers,
6979 &fs_info->qgroup_rescan_work);
6980 }
6981 @@ -3276,7 +3277,6 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
6982 sizeof(fs_info->qgroup_rescan_progress));
6983 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
6984 init_completion(&fs_info->qgroup_rescan_completion);
6985 - fs_info->qgroup_rescan_running = true;
6986
6987 spin_unlock(&fs_info->qgroup_lock);
6988 mutex_unlock(&fs_info->qgroup_rescan_lock);
6989 @@ -3341,8 +3341,11 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
6990
6991 qgroup_rescan_zero_tracking(fs_info);
6992
6993 + mutex_lock(&fs_info->qgroup_rescan_lock);
6994 + fs_info->qgroup_rescan_running = true;
6995 btrfs_queue_work(fs_info->qgroup_rescan_workers,
6996 &fs_info->qgroup_rescan_work);
6997 + mutex_unlock(&fs_info->qgroup_rescan_lock);
6998
6999 return 0;
7000 }
7001 @@ -3378,9 +3381,13 @@ int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
7002 void
7003 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
7004 {
7005 - if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
7006 + if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
7007 + mutex_lock(&fs_info->qgroup_rescan_lock);
7008 + fs_info->qgroup_rescan_running = true;
7009 btrfs_queue_work(fs_info->qgroup_rescan_workers,
7010 &fs_info->qgroup_rescan_work);
7011 + mutex_unlock(&fs_info->qgroup_rescan_lock);
7012 + }
7013 }
7014
7015 /*
7016 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
7017 index bc1d7f144ace..04bdbbb746a4 100644
7018 --- a/fs/btrfs/relocation.c
7019 +++ b/fs/btrfs/relocation.c
7020 @@ -1186,7 +1186,7 @@ out:
7021 free_backref_node(cache, lower);
7022 }
7023
7024 - free_backref_node(cache, node);
7025 + remove_backref_node(cache, node);
7026 return ERR_PTR(err);
7027 }
7028 ASSERT(!node || !node->detached);
7029 @@ -1298,7 +1298,7 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
7030 if (!node)
7031 return -ENOMEM;
7032
7033 - node->bytenr = root->node->start;
7034 + node->bytenr = root->commit_root->start;
7035 node->data = root;
7036
7037 spin_lock(&rc->reloc_root_tree.lock);
7038 @@ -1329,10 +1329,11 @@ static void __del_reloc_root(struct btrfs_root *root)
7039 if (rc && root->node) {
7040 spin_lock(&rc->reloc_root_tree.lock);
7041 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
7042 - root->node->start);
7043 + root->commit_root->start);
7044 if (rb_node) {
7045 node = rb_entry(rb_node, struct mapping_node, rb_node);
7046 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
7047 + RB_CLEAR_NODE(&node->rb_node);
7048 }
7049 spin_unlock(&rc->reloc_root_tree.lock);
7050 if (!node)
7051 @@ -1350,7 +1351,7 @@ static void __del_reloc_root(struct btrfs_root *root)
7052 * helper to update the 'address of tree root -> reloc tree'
7053 * mapping
7054 */
7055 -static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
7056 +static int __update_reloc_root(struct btrfs_root *root)
7057 {
7058 struct btrfs_fs_info *fs_info = root->fs_info;
7059 struct rb_node *rb_node;
7060 @@ -1359,7 +1360,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
7061
7062 spin_lock(&rc->reloc_root_tree.lock);
7063 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
7064 - root->node->start);
7065 + root->commit_root->start);
7066 if (rb_node) {
7067 node = rb_entry(rb_node, struct mapping_node, rb_node);
7068 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
7069 @@ -1371,7 +1372,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
7070 BUG_ON((struct btrfs_root *)node->data != root);
7071
7072 spin_lock(&rc->reloc_root_tree.lock);
7073 - node->bytenr = new_bytenr;
7074 + node->bytenr = root->node->start;
7075 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
7076 node->bytenr, &node->rb_node);
7077 spin_unlock(&rc->reloc_root_tree.lock);
7078 @@ -1529,6 +1530,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
7079 }
7080
7081 if (reloc_root->commit_root != reloc_root->node) {
7082 + __update_reloc_root(reloc_root);
7083 btrfs_set_root_node(root_item, reloc_root->node);
7084 free_extent_buffer(reloc_root->commit_root);
7085 reloc_root->commit_root = btrfs_root_node(reloc_root);
7086 @@ -2562,7 +2564,21 @@ out:
7087 free_reloc_roots(&reloc_roots);
7088 }
7089
7090 - BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
7091 + /*
7092 + * We used to have
7093 + *
7094 + * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
7095 + *
7096 + * here, but it's wrong. If we fail to start the transaction in
7097 + * prepare_to_merge() we will have only 0 ref reloc roots, none of which
7098 + * have actually been removed from the reloc_root_tree rb tree. This is
7099 + * fine because we're bailing here, and we hold a reference on the root
7100 + * for the list that holds it, so these roots will be cleaned up when we
7101 + * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
7102 + * will be cleaned up on unmount.
7103 + *
7104 + * The remaining nodes will be cleaned up by free_reloc_control.
7105 + */
7106 }
7107
7108 static void free_block_list(struct rb_root *blocks)
7109 @@ -3162,9 +3178,8 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
7110 ret = relocate_tree_block(trans, rc, node, &block->key,
7111 path);
7112 if (ret < 0) {
7113 - if (ret != -EAGAIN || &block->rb_node == rb_first(blocks))
7114 - err = ret;
7115 - goto out;
7116 + err = ret;
7117 + break;
7118 }
7119 }
7120 out:
7121 @@ -4140,12 +4155,6 @@ restart:
7122 if (!RB_EMPTY_ROOT(&blocks)) {
7123 ret = relocate_tree_blocks(trans, rc, &blocks);
7124 if (ret < 0) {
7125 - /*
7126 - * if we fail to relocate tree blocks, force to update
7127 - * backref cache when committing transaction.
7128 - */
7129 - rc->backref_cache.last_trans = trans->transid - 1;
7130 -
7131 if (ret != -EAGAIN) {
7132 err = ret;
7133 break;
7134 @@ -4215,10 +4224,10 @@ restart:
7135 goto out_free;
7136 }
7137 btrfs_commit_transaction(trans);
7138 +out_free:
7139 ret = clean_dirty_subvols(rc);
7140 if (ret < 0 && !err)
7141 err = ret;
7142 -out_free:
7143 btrfs_free_block_rsv(fs_info, rc->block_rsv);
7144 btrfs_free_path(path);
7145 return err;
7146 @@ -4575,9 +4584,8 @@ int btrfs_recover_relocation(struct btrfs_root *root)
7147
7148 trans = btrfs_join_transaction(rc->extent_root);
7149 if (IS_ERR(trans)) {
7150 - unset_reloc_control(rc);
7151 err = PTR_ERR(trans);
7152 - goto out_free;
7153 + goto out_unset;
7154 }
7155
7156 rc->merge_reloc_tree = 1;
7157 @@ -4597,7 +4605,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
7158 if (IS_ERR(fs_root)) {
7159 err = PTR_ERR(fs_root);
7160 list_add_tail(&reloc_root->root_list, &reloc_roots);
7161 - goto out_free;
7162 + goto out_unset;
7163 }
7164
7165 err = __add_reloc_root(reloc_root);
7166 @@ -4607,7 +4615,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
7167
7168 err = btrfs_commit_transaction(trans);
7169 if (err)
7170 - goto out_free;
7171 + goto out_unset;
7172
7173 merge_reloc_roots(rc);
7174
7175 @@ -4616,14 +4624,15 @@ int btrfs_recover_relocation(struct btrfs_root *root)
7176 trans = btrfs_join_transaction(rc->extent_root);
7177 if (IS_ERR(trans)) {
7178 err = PTR_ERR(trans);
7179 - goto out_free;
7180 + goto out_clean;
7181 }
7182 err = btrfs_commit_transaction(trans);
7183 -
7184 +out_clean:
7185 ret = clean_dirty_subvols(rc);
7186 if (ret < 0 && !err)
7187 err = ret;
7188 -out_free:
7189 +out_unset:
7190 + unset_reloc_control(rc);
7191 kfree(rc);
7192 out:
7193 if (!list_empty(&reloc_roots))
7194 @@ -4711,11 +4720,6 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
7195 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
7196 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
7197
7198 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
7199 - if (buf == root->node)
7200 - __update_reloc_root(root, cow->start);
7201 - }
7202 -
7203 level = btrfs_header_level(buf);
7204 if (btrfs_header_generation(buf) <=
7205 btrfs_root_last_snapshot(&root->root_item))
7206 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
7207 index 35c55cf38a35..b095094c0842 100644
7208 --- a/fs/cifs/file.c
7209 +++ b/fs/cifs/file.c
7210 @@ -3778,7 +3778,7 @@ again:
7211 if (rc == -ENODATA)
7212 rc = 0;
7213
7214 - ctx->rc = (rc == 0) ? ctx->total_len : rc;
7215 + ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
7216
7217 mutex_unlock(&ctx->aio_mutex);
7218
7219 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
7220 index 7c5e983fe385..5e6bc8fa4e46 100644
7221 --- a/fs/cifs/inode.c
7222 +++ b/fs/cifs/inode.c
7223 @@ -2454,25 +2454,26 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
7224
7225 /*
7226 * Attempt to flush data before changing attributes. We need to do
7227 - * this for ATTR_SIZE and ATTR_MTIME for sure, and if we change the
7228 - * ownership or mode then we may also need to do this. Here, we take
7229 - * the safe way out and just do the flush on all setattr requests. If
7230 - * the flush returns error, store it to report later and continue.
7231 + * this for ATTR_SIZE and ATTR_MTIME. If the flush of the data
7232 + * returns error, store it to report later and continue.
7233 *
7234 * BB: This should be smarter. Why bother flushing pages that
7235 * will be truncated anyway? Also, should we error out here if
7236 - * the flush returns error?
7237 + * the flush returns error? Do we need to check for ATTR_MTIME_SET flag?
7238 */
7239 - rc = filemap_write_and_wait(inode->i_mapping);
7240 - if (is_interrupt_error(rc)) {
7241 - rc = -ERESTARTSYS;
7242 - goto cifs_setattr_exit;
7243 + if (attrs->ia_valid & (ATTR_MTIME | ATTR_SIZE | ATTR_CTIME)) {
7244 + rc = filemap_write_and_wait(inode->i_mapping);
7245 + if (is_interrupt_error(rc)) {
7246 + rc = -ERESTARTSYS;
7247 + goto cifs_setattr_exit;
7248 + }
7249 + mapping_set_error(inode->i_mapping, rc);
7250 }
7251
7252 - mapping_set_error(inode->i_mapping, rc);
7253 rc = 0;
7254
7255 - if (attrs->ia_valid & ATTR_MTIME) {
7256 + if ((attrs->ia_valid & ATTR_MTIME) &&
7257 + !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
7258 rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile);
7259 if (!rc) {
7260 tcon = tlink_tcon(wfile->tlink);
7261 diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
7262 index 8fd45eb89424..b43960794922 100644
7263 --- a/fs/debugfs/file.c
7264 +++ b/fs/debugfs/file.c
7265 @@ -175,8 +175,13 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
7266 if (r)
7267 goto out;
7268
7269 - real_fops = fops_get(real_fops);
7270 - if (!real_fops) {
7271 + if (!fops_get(real_fops)) {
7272 +#ifdef MODULE
7273 + if (real_fops->owner &&
7274 + real_fops->owner->state == MODULE_STATE_GOING)
7275 + goto out;
7276 +#endif
7277 +
7278 /* Huh? Module did not clean up after itself at exit? */
7279 WARN(1, "debugfs file owner did not clean up at exit: %pd",
7280 dentry);
7281 @@ -305,8 +310,13 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
7282 if (r)
7283 goto out;
7284
7285 - real_fops = fops_get(real_fops);
7286 - if (!real_fops) {
7287 + if (!fops_get(real_fops)) {
7288 +#ifdef MODULE
7289 + if (real_fops->owner &&
7290 + real_fops->owner->state == MODULE_STATE_GOING)
7291 + goto out;
7292 +#endif
7293 +
7294 /* Huh? Module did not cleanup after itself at exit? */
7295 WARN(1, "debugfs file owner did not clean up at exit: %pd",
7296 dentry);
7297 diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
7298 index d92b3e753a6f..3e28fd082df0 100644
7299 --- a/fs/erofs/utils.c
7300 +++ b/fs/erofs/utils.c
7301 @@ -294,7 +294,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink,
7302 spin_unlock(&erofs_sb_list_lock);
7303 sbi->shrinker_run_no = run_no;
7304
7305 - freed += erofs_shrink_workstation(sbi, nr, false);
7306 + freed += erofs_shrink_workstation(sbi, nr - freed, false);
7307
7308 spin_lock(&erofs_sb_list_lock);
7309 /* Get the next list element before we move this one */
7310 diff --git a/fs/exec.c b/fs/exec.c
7311 index c27231234764..fc2870f2aca9 100644
7312 --- a/fs/exec.c
7313 +++ b/fs/exec.c
7314 @@ -1383,7 +1383,7 @@ void setup_new_exec(struct linux_binprm * bprm)
7315
7316 /* An exec changes our domain. We are no longer part of the thread
7317 group */
7318 - current->self_exec_id++;
7319 + WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1);
7320 flush_signal_handlers(current, 0);
7321 }
7322 EXPORT_SYMBOL(setup_new_exec);
7323 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
7324 index 70ef4a714b33..d2edd6e9072f 100644
7325 --- a/fs/ext4/inode.c
7326 +++ b/fs/ext4/inode.c
7327 @@ -5140,7 +5140,7 @@ static int ext4_inode_blocks_set(handle_t *handle,
7328 struct ext4_inode_info *ei)
7329 {
7330 struct inode *inode = &(ei->vfs_inode);
7331 - u64 i_blocks = inode->i_blocks;
7332 + u64 i_blocks = READ_ONCE(inode->i_blocks);
7333 struct super_block *sb = inode->i_sb;
7334
7335 if (i_blocks <= ~0U) {
7336 diff --git a/fs/filesystems.c b/fs/filesystems.c
7337 index 9135646e41ac..5e1a19013373 100644
7338 --- a/fs/filesystems.c
7339 +++ b/fs/filesystems.c
7340 @@ -271,7 +271,9 @@ struct file_system_type *get_fs_type(const char *name)
7341 fs = __get_fs_type(name, len);
7342 if (!fs && (request_module("fs-%.*s", len, name) == 0)) {
7343 fs = __get_fs_type(name, len);
7344 - WARN_ONCE(!fs, "request_module fs-%.*s succeeded, but still no fs?\n", len, name);
7345 + if (!fs)
7346 + pr_warn_once("request_module fs-%.*s succeeded, but still no fs?\n",
7347 + len, name);
7348 }
7349
7350 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
7351 diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
7352 index 0290a22ebccf..21820a5b388f 100644
7353 --- a/fs/gfs2/glock.c
7354 +++ b/fs/gfs2/glock.c
7355 @@ -639,6 +639,9 @@ __acquires(&gl->gl_lockref.lock)
7356 goto out_unlock;
7357 if (nonblock)
7358 goto out_sched;
7359 + smp_mb();
7360 + if (atomic_read(&gl->gl_revokes) != 0)
7361 + goto out_sched;
7362 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
7363 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
7364 gl->gl_target = gl->gl_demote_state;
7365 diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
7366 index ff213690e364..83cf64da474c 100644
7367 --- a/fs/gfs2/glops.c
7368 +++ b/fs/gfs2/glops.c
7369 @@ -89,8 +89,32 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
7370 INIT_LIST_HEAD(&tr.tr_databuf);
7371 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
7372
7373 - if (!tr.tr_revokes)
7374 + if (!tr.tr_revokes) {
7375 + bool have_revokes;
7376 + bool log_in_flight;
7377 +
7378 + /*
7379 + * We have nothing on the ail, but there could be revokes on
7380 + * the sdp revoke queue, in which case, we still want to flush
7381 + * the log and wait for it to finish.
7382 + *
7383 + * If the sdp revoke list is empty too, we might still have an
7384 + * io outstanding for writing revokes, so we should wait for
7385 + * it before returning.
7386 + *
7387 + * If none of these conditions are true, our revokes are all
7388 + * flushed and we can return.
7389 + */
7390 + gfs2_log_lock(sdp);
7391 + have_revokes = !list_empty(&sdp->sd_log_revokes);
7392 + log_in_flight = atomic_read(&sdp->sd_log_in_flight);
7393 + gfs2_log_unlock(sdp);
7394 + if (have_revokes)
7395 + goto flush;
7396 + if (log_in_flight)
7397 + log_flush_wait(sdp);
7398 return;
7399 + }
7400
7401 /* A shortened, inline version of gfs2_trans_begin()
7402 * tr->alloced is not set since the transaction structure is
7403 @@ -105,6 +129,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
7404 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
7405
7406 gfs2_trans_end(sdp);
7407 +flush:
7408 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
7409 GFS2_LFC_AIL_EMPTY_GL);
7410 }
7411 diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
7412 index 2aed73666a65..47bc27d4169e 100644
7413 --- a/fs/gfs2/log.c
7414 +++ b/fs/gfs2/log.c
7415 @@ -513,7 +513,7 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
7416 }
7417
7418
7419 -static void log_flush_wait(struct gfs2_sbd *sdp)
7420 +void log_flush_wait(struct gfs2_sbd *sdp)
7421 {
7422 DEFINE_WAIT(wait);
7423
7424 diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
7425 index c762da494546..52b9bf27e918 100644
7426 --- a/fs/gfs2/log.h
7427 +++ b/fs/gfs2/log.h
7428 @@ -73,6 +73,7 @@ extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
7429 u32 type);
7430 extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
7431 extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
7432 +extern void log_flush_wait(struct gfs2_sbd *sdp);
7433
7434 extern void gfs2_log_shutdown(struct gfs2_sbd *sdp);
7435 extern int gfs2_logd(void *data);
7436 diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
7437 index e6d554476db4..eeebe80c6be4 100644
7438 --- a/fs/hfsplus/attributes.c
7439 +++ b/fs/hfsplus/attributes.c
7440 @@ -292,6 +292,10 @@ static int __hfsplus_delete_attr(struct inode *inode, u32 cnid,
7441 return -ENOENT;
7442 }
7443
7444 + /* Avoid btree corruption */
7445 + hfs_bnode_read(fd->bnode, fd->search_key,
7446 + fd->keyoffset, fd->keylength);
7447 +
7448 err = hfs_brec_remove(fd);
7449 if (err)
7450 return err;
7451 diff --git a/fs/io_uring.c b/fs/io_uring.c
7452 index e37b84146453..b2ccb908f6b6 100644
7453 --- a/fs/io_uring.c
7454 +++ b/fs/io_uring.c
7455 @@ -331,6 +331,7 @@ struct io_kiocb {
7456 #define REQ_F_ISREG 2048 /* regular file */
7457 #define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
7458 #define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
7459 + unsigned long fsize;
7460 u64 user_data;
7461 u32 result;
7462 u32 sequence;
7463 @@ -1085,6 +1086,9 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
7464 if (S_ISREG(file_inode(req->file)->i_mode))
7465 req->flags |= REQ_F_ISREG;
7466
7467 + if (force_nonblock)
7468 + req->fsize = rlimit(RLIMIT_FSIZE);
7469 +
7470 /*
7471 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
7472 * we know to async punt it even if it was opened O_NONBLOCK
7473 @@ -1504,10 +1508,17 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
7474 }
7475 kiocb->ki_flags |= IOCB_WRITE;
7476
7477 + if (!force_nonblock)
7478 + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
7479 +
7480 if (file->f_op->write_iter)
7481 ret2 = call_write_iter(file, kiocb, &iter);
7482 else
7483 ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
7484 +
7485 + if (!force_nonblock)
7486 + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
7487 +
7488 if (!force_nonblock || ret2 != -EAGAIN) {
7489 io_rw_done(kiocb, ret2);
7490 } else {
7491 @@ -3092,13 +3103,6 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7492 struct sk_buff *skb;
7493 int i;
7494
7495 - if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
7496 - unsigned long inflight = ctx->user->unix_inflight + nr;
7497 -
7498 - if (inflight > task_rlimit(current, RLIMIT_NOFILE))
7499 - return -EMFILE;
7500 - }
7501 -
7502 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7503 if (!fpl)
7504 return -ENOMEM;
7505 diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
7506 index 20b3717cd7ca..8b7c525dbbf7 100644
7507 --- a/fs/nfs/pagelist.c
7508 +++ b/fs/nfs/pagelist.c
7509 @@ -1177,38 +1177,38 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
7510 if (desc->pg_error < 0)
7511 goto out_failed;
7512
7513 - for (midx = 0; midx < desc->pg_mirror_count; midx++) {
7514 - if (midx) {
7515 - nfs_page_group_lock(req);
7516 -
7517 - /* find the last request */
7518 - for (lastreq = req->wb_head;
7519 - lastreq->wb_this_page != req->wb_head;
7520 - lastreq = lastreq->wb_this_page)
7521 - ;
7522 -
7523 - dupreq = nfs_create_subreq(req, lastreq,
7524 - pgbase, offset, bytes);
7525 -
7526 - nfs_page_group_unlock(req);
7527 - if (IS_ERR(dupreq)) {
7528 - desc->pg_error = PTR_ERR(dupreq);
7529 - goto out_failed;
7530 - }
7531 - } else
7532 - dupreq = req;
7533 + /* Create the mirror instances first, and fire them off */
7534 + for (midx = 1; midx < desc->pg_mirror_count; midx++) {
7535 + nfs_page_group_lock(req);
7536 +
7537 + /* find the last request */
7538 + for (lastreq = req->wb_head;
7539 + lastreq->wb_this_page != req->wb_head;
7540 + lastreq = lastreq->wb_this_page)
7541 + ;
7542 +
7543 + dupreq = nfs_create_subreq(req, lastreq,
7544 + pgbase, offset, bytes);
7545 +
7546 + nfs_page_group_unlock(req);
7547 + if (IS_ERR(dupreq)) {
7548 + desc->pg_error = PTR_ERR(dupreq);
7549 + goto out_failed;
7550 + }
7551
7552 - if (nfs_pgio_has_mirroring(desc))
7553 - desc->pg_mirror_idx = midx;
7554 + desc->pg_mirror_idx = midx;
7555 if (!nfs_pageio_add_request_mirror(desc, dupreq))
7556 goto out_cleanup_subreq;
7557 }
7558
7559 + desc->pg_mirror_idx = 0;
7560 + if (!nfs_pageio_add_request_mirror(desc, req))
7561 + goto out_failed;
7562 +
7563 return 1;
7564
7565 out_cleanup_subreq:
7566 - if (req != dupreq)
7567 - nfs_pageio_cleanup_request(desc, dupreq);
7568 + nfs_pageio_cleanup_request(desc, dupreq);
7569 out_failed:
7570 nfs_pageio_error_cleanup(desc);
7571 return 0;
7572 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
7573 index 913eb37c249b..58c8317dd7d8 100644
7574 --- a/fs/nfs/write.c
7575 +++ b/fs/nfs/write.c
7576 @@ -441,6 +441,7 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
7577 }
7578
7579 subreq->wb_head = subreq;
7580 + nfs_release_request(old_head);
7581
7582 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
7583 nfs_release_request(subreq);
7584 diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
7585 index 11b42c523f04..d77c5261c03c 100644
7586 --- a/fs/nfsd/nfsctl.c
7587 +++ b/fs/nfsd/nfsctl.c
7588 @@ -1333,6 +1333,7 @@ void nfsd_client_rmdir(struct dentry *dentry)
7589 dget(dentry);
7590 ret = simple_rmdir(dir, dentry);
7591 WARN_ON_ONCE(ret);
7592 + fsnotify_rmdir(dir, dentry);
7593 d_delete(dentry);
7594 inode_unlock(dir);
7595 }
7596 diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
7597 index f9baefc76cf9..0a6fe7d5aba7 100644
7598 --- a/fs/ocfs2/alloc.c
7599 +++ b/fs/ocfs2/alloc.c
7600 @@ -7403,6 +7403,10 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
7601 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
7602 struct ocfs2_inline_data *idata = &di->id2.i_data;
7603
7604 + /* No need to punch hole beyond i_size. */
7605 + if (start >= i_size_read(inode))
7606 + return 0;
7607 +
7608 if (end > i_size_read(inode))
7609 end = i_size_read(inode);
7610
7611 diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
7612 index 7fbe8f058220..d99b5d39aa90 100644
7613 --- a/fs/pstore/inode.c
7614 +++ b/fs/pstore/inode.c
7615 @@ -87,11 +87,11 @@ static void *pstore_ftrace_seq_next(struct seq_file *s, void *v, loff_t *pos)
7616 struct pstore_private *ps = s->private;
7617 struct pstore_ftrace_seq_data *data = v;
7618
7619 + (*pos)++;
7620 data->off += REC_SIZE;
7621 if (data->off + REC_SIZE > ps->total_size)
7622 return NULL;
7623
7624 - (*pos)++;
7625 return data;
7626 }
7627
7628 @@ -101,6 +101,9 @@ static int pstore_ftrace_seq_show(struct seq_file *s, void *v)
7629 struct pstore_ftrace_seq_data *data = v;
7630 struct pstore_ftrace_record *rec;
7631
7632 + if (!data)
7633 + return 0;
7634 +
7635 rec = (struct pstore_ftrace_record *)(ps->record->buf + data->off);
7636
7637 seq_printf(s, "CPU:%d ts:%llu %08lx %08lx %ps <- %pS\n",
7638 diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
7639 index 3d7024662d29..cdf5b8ae2583 100644
7640 --- a/fs/pstore/platform.c
7641 +++ b/fs/pstore/platform.c
7642 @@ -823,9 +823,9 @@ static int __init pstore_init(void)
7643
7644 ret = pstore_init_fs();
7645 if (ret)
7646 - return ret;
7647 + free_buf_for_compression();
7648
7649 - return 0;
7650 + return ret;
7651 }
7652 late_initcall(pstore_init);
7653
7654 diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
7655 index 4010c42e40bd..00441e24a5b9 100644
7656 --- a/include/acpi/acpixf.h
7657 +++ b/include/acpi/acpixf.h
7658 @@ -748,7 +748,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3
7659 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
7660 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
7661 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
7662 -ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
7663 +ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(u32 gpe_skip_number))
7664 ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void))
7665
7666 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
7667 diff --git a/include/linux/cpu.h b/include/linux/cpu.h
7668 index bc6c879bd110..4e9822cb11f3 100644
7669 --- a/include/linux/cpu.h
7670 +++ b/include/linux/cpu.h
7671 @@ -138,12 +138,18 @@ static inline void get_online_cpus(void) { cpus_read_lock(); }
7672 static inline void put_online_cpus(void) { cpus_read_unlock(); }
7673
7674 #ifdef CONFIG_PM_SLEEP_SMP
7675 -extern int freeze_secondary_cpus(int primary);
7676 +int __freeze_secondary_cpus(int primary, bool suspend);
7677 +static inline int freeze_secondary_cpus(int primary)
7678 +{
7679 + return __freeze_secondary_cpus(primary, true);
7680 +}
7681 +
7682 static inline int disable_nonboot_cpus(void)
7683 {
7684 - return freeze_secondary_cpus(0);
7685 + return __freeze_secondary_cpus(0, false);
7686 }
7687 -extern void enable_nonboot_cpus(void);
7688 +
7689 +void enable_nonboot_cpus(void);
7690
7691 static inline int suspend_disable_secondary_cpus(void)
7692 {
7693 diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
7694 index 4635f95000a4..79a6e37a1d6f 100644
7695 --- a/include/linux/devfreq_cooling.h
7696 +++ b/include/linux/devfreq_cooling.h
7697 @@ -75,7 +75,7 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *dfc);
7698
7699 #else /* !CONFIG_DEVFREQ_THERMAL */
7700
7701 -struct thermal_cooling_device *
7702 +static inline struct thermal_cooling_device *
7703 of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
7704 struct devfreq_cooling_power *dfc_power)
7705 {
7706 diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
7707 index dba15ca8e60b..1dcd9198beb7 100644
7708 --- a/include/linux/iocontext.h
7709 +++ b/include/linux/iocontext.h
7710 @@ -8,6 +8,7 @@
7711
7712 enum {
7713 ICQ_EXITED = 1 << 2,
7714 + ICQ_DESTROYED = 1 << 3,
7715 };
7716
7717 /*
7718 diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
7719 index 6d0d70f3219c..10f81629b9ce 100644
7720 --- a/include/linux/nvme-fc-driver.h
7721 +++ b/include/linux/nvme-fc-driver.h
7722 @@ -270,8 +270,6 @@ struct nvme_fc_remote_port {
7723 *
7724 * Host/Initiator Transport Entrypoints/Parameters:
7725 *
7726 - * @module: The LLDD module using the interface
7727 - *
7728 * @localport_delete: The LLDD initiates deletion of a localport via
7729 * nvme_fc_deregister_localport(). However, the teardown is
7730 * asynchronous. This routine is called upon the completion of the
7731 @@ -385,8 +383,6 @@ struct nvme_fc_remote_port {
7732 * Value is Mandatory. Allowed to be zero.
7733 */
7734 struct nvme_fc_port_template {
7735 - struct module *module;
7736 -
7737 /* initiator-based functions */
7738 void (*localport_delete)(struct nvme_fc_local_port *);
7739 void (*remoteport_delete)(struct nvme_fc_remote_port *);
7740 diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
7741 index f641badc2c61..0c12d69dde92 100644
7742 --- a/include/linux/pci-epc.h
7743 +++ b/include/linux/pci-epc.h
7744 @@ -71,6 +71,7 @@ struct pci_epc_ops {
7745 * @bitmap: bitmap to manage the PCI address space
7746 * @pages: number of bits representing the address region
7747 * @page_size: size of each page
7748 + * @lock: mutex to protect bitmap
7749 */
7750 struct pci_epc_mem {
7751 phys_addr_t phys_base;
7752 @@ -78,6 +79,8 @@ struct pci_epc_mem {
7753 unsigned long *bitmap;
7754 size_t page_size;
7755 int pages;
7756 + /* mutex to protect against concurrent access for memory allocation*/
7757 + struct mutex lock;
7758 };
7759
7760 /**
7761 diff --git a/include/linux/sched.h b/include/linux/sched.h
7762 index b968d736833b..5710b80f8050 100644
7763 --- a/include/linux/sched.h
7764 +++ b/include/linux/sched.h
7765 @@ -934,8 +934,8 @@ struct task_struct {
7766 struct seccomp seccomp;
7767
7768 /* Thread group tracking: */
7769 - u32 parent_exec_id;
7770 - u32 self_exec_id;
7771 + u64 parent_exec_id;
7772 + u64 self_exec_id;
7773
7774 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
7775 spinlock_t alloc_lock;
7776 diff --git a/include/linux/xarray.h b/include/linux/xarray.h
7777 index 86eecbd98e84..3b257c97837d 100644
7778 --- a/include/linux/xarray.h
7779 +++ b/include/linux/xarray.h
7780 @@ -1613,6 +1613,7 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
7781 xa_mark_t mark)
7782 {
7783 struct xa_node *node = xas->xa_node;
7784 + void *entry;
7785 unsigned int offset;
7786
7787 if (unlikely(xas_not_node(node) || node->shift))
7788 @@ -1624,7 +1625,10 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
7789 return NULL;
7790 if (offset == XA_CHUNK_SIZE)
7791 return xas_find_marked(xas, max, mark);
7792 - return xa_entry(xas->xa, node, offset);
7793 + entry = xa_entry(xas->xa, node, offset);
7794 + if (!entry)
7795 + return xas_find_marked(xas, max, mark);
7796 + return entry;
7797 }
7798
7799 /*
7800 diff --git a/include/media/rc-map.h b/include/media/rc-map.h
7801 index afd2ab31bdf2..c2ef3906e1cd 100644
7802 --- a/include/media/rc-map.h
7803 +++ b/include/media/rc-map.h
7804 @@ -271,6 +271,7 @@ struct rc_map *rc_map_get(const char *name);
7805 #define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100"
7806 #define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350"
7807 #define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr"
7808 +#define RC_MAP_KII_PRO "rc-videostrong-kii-pro"
7809 #define RC_MAP_WETEK_HUB "rc-wetek-hub"
7810 #define RC_MAP_WETEK_PLAY2 "rc-wetek-play2"
7811 #define RC_MAP_WINFAST "rc-winfast"
7812 diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
7813 index 299240df79e4..04e97bab6f28 100644
7814 --- a/include/net/af_rxrpc.h
7815 +++ b/include/net/af_rxrpc.h
7816 @@ -16,6 +16,12 @@ struct sock;
7817 struct socket;
7818 struct rxrpc_call;
7819
7820 +enum rxrpc_interruptibility {
7821 + RXRPC_INTERRUPTIBLE, /* Call is interruptible */
7822 + RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */
7823 + RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */
7824 +};
7825 +
7826 /*
7827 * Debug ID counter for tracing.
7828 */
7829 @@ -41,7 +47,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
7830 gfp_t,
7831 rxrpc_notify_rx_t,
7832 bool,
7833 - bool,
7834 + enum rxrpc_interruptibility,
7835 unsigned int);
7836 int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
7837 struct msghdr *, size_t,
7838 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
7839 index a0b76b360d6f..013780ef0bd7 100644
7840 --- a/kernel/bpf/verifier.c
7841 +++ b/kernel/bpf/verifier.c
7842 @@ -5325,6 +5325,70 @@ static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
7843 reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
7844 }
7845
7846 +/* Constrain the possible values of @reg with unsigned upper bound @bound.
7847 + * If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive.
7848 + * If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits
7849 + * of @reg.
7850 + */
7851 +static void set_upper_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32,
7852 + bool is_exclusive)
7853 +{
7854 + if (is_exclusive) {
7855 + /* There are no values for `reg` that make `reg<0` true. */
7856 + if (bound == 0)
7857 + return;
7858 + bound--;
7859 + }
7860 + if (is_jmp32) {
7861 + /* Constrain the register's value in the tnum representation.
7862 + * For 64-bit comparisons this happens later in
7863 + * __reg_bound_offset(), but for 32-bit comparisons, we can be
7864 + * more precise than what can be derived from the updated
7865 + * numeric bounds.
7866 + */
7867 + struct tnum t = tnum_range(0, bound);
7868 +
7869 + t.mask |= ~0xffffffffULL; /* upper half is unknown */
7870 + reg->var_off = tnum_intersect(reg->var_off, t);
7871 +
7872 + /* Compute the 64-bit bound from the 32-bit bound. */
7873 + bound += gen_hi_max(reg->var_off);
7874 + }
7875 + reg->umax_value = min(reg->umax_value, bound);
7876 +}
7877 +
7878 +/* Constrain the possible values of @reg with unsigned lower bound @bound.
7879 + * If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive.
7880 + * If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits
7881 + * of @reg.
7882 + */
7883 +static void set_lower_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32,
7884 + bool is_exclusive)
7885 +{
7886 + if (is_exclusive) {
7887 + /* There are no values for `reg` that make `reg>MAX` true. */
7888 + if (bound == (is_jmp32 ? U32_MAX : U64_MAX))
7889 + return;
7890 + bound++;
7891 + }
7892 + if (is_jmp32) {
7893 + /* Constrain the register's value in the tnum representation.
7894 + * For 64-bit comparisons this happens later in
7895 + * __reg_bound_offset(), but for 32-bit comparisons, we can be
7896 + * more precise than what can be derived from the updated
7897 + * numeric bounds.
7898 + */
7899 + struct tnum t = tnum_range(bound, U32_MAX);
7900 +
7901 + t.mask |= ~0xffffffffULL; /* upper half is unknown */
7902 + reg->var_off = tnum_intersect(reg->var_off, t);
7903 +
7904 + /* Compute the 64-bit bound from the 32-bit bound. */
7905 + bound += gen_hi_min(reg->var_off);
7906 + }
7907 + reg->umin_value = max(reg->umin_value, bound);
7908 +}
7909 +
7910 /* Adjusts the register min/max values in the case that the dst_reg is the
7911 * variable register that we are working on, and src_reg is a constant or we're
7912 * simply doing a BPF_K check.
7913 @@ -5380,15 +5444,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
7914 case BPF_JGE:
7915 case BPF_JGT:
7916 {
7917 - u64 false_umax = opcode == BPF_JGT ? val : val - 1;
7918 - u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
7919 -
7920 - if (is_jmp32) {
7921 - false_umax += gen_hi_max(false_reg->var_off);
7922 - true_umin += gen_hi_min(true_reg->var_off);
7923 - }
7924 - false_reg->umax_value = min(false_reg->umax_value, false_umax);
7925 - true_reg->umin_value = max(true_reg->umin_value, true_umin);
7926 + set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JGE);
7927 + set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JGT);
7928 break;
7929 }
7930 case BPF_JSGE:
7931 @@ -5409,15 +5466,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
7932 case BPF_JLE:
7933 case BPF_JLT:
7934 {
7935 - u64 false_umin = opcode == BPF_JLT ? val : val + 1;
7936 - u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
7937 -
7938 - if (is_jmp32) {
7939 - false_umin += gen_hi_min(false_reg->var_off);
7940 - true_umax += gen_hi_max(true_reg->var_off);
7941 - }
7942 - false_reg->umin_value = max(false_reg->umin_value, false_umin);
7943 - true_reg->umax_value = min(true_reg->umax_value, true_umax);
7944 + set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JLE);
7945 + set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JLT);
7946 break;
7947 }
7948 case BPF_JSLE:
7949 @@ -5492,15 +5542,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
7950 case BPF_JGE:
7951 case BPF_JGT:
7952 {
7953 - u64 false_umin = opcode == BPF_JGT ? val : val + 1;
7954 - u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
7955 -
7956 - if (is_jmp32) {
7957 - false_umin += gen_hi_min(false_reg->var_off);
7958 - true_umax += gen_hi_max(true_reg->var_off);
7959 - }
7960 - false_reg->umin_value = max(false_reg->umin_value, false_umin);
7961 - true_reg->umax_value = min(true_reg->umax_value, true_umax);
7962 + set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JGE);
7963 + set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JGT);
7964 break;
7965 }
7966 case BPF_JSGE:
7967 @@ -5518,15 +5561,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
7968 case BPF_JLE:
7969 case BPF_JLT:
7970 {
7971 - u64 false_umax = opcode == BPF_JLT ? val : val - 1;
7972 - u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
7973 -
7974 - if (is_jmp32) {
7975 - false_umax += gen_hi_max(false_reg->var_off);
7976 - true_umin += gen_hi_min(true_reg->var_off);
7977 - }
7978 - false_reg->umax_value = min(false_reg->umax_value, false_umax);
7979 - true_reg->umin_value = max(true_reg->umin_value, true_umin);
7980 + set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JLE);
7981 + set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JLT);
7982 break;
7983 }
7984 case BPF_JSLE:
7985 diff --git a/kernel/cpu.c b/kernel/cpu.c
7986 index 406828fb3038..d7890c1285bf 100644
7987 --- a/kernel/cpu.c
7988 +++ b/kernel/cpu.c
7989 @@ -1212,7 +1212,7 @@ EXPORT_SYMBOL_GPL(cpu_up);
7990 #ifdef CONFIG_PM_SLEEP_SMP
7991 static cpumask_var_t frozen_cpus;
7992
7993 -int freeze_secondary_cpus(int primary)
7994 +int __freeze_secondary_cpus(int primary, bool suspend)
7995 {
7996 int cpu, error = 0;
7997
7998 @@ -1237,7 +1237,7 @@ int freeze_secondary_cpus(int primary)
7999 if (cpu == primary)
8000 continue;
8001
8002 - if (pm_wakeup_pending()) {
8003 + if (suspend && pm_wakeup_pending()) {
8004 pr_info("Wakeup pending. Abort CPU freeze\n");
8005 error = -EBUSY;
8006 break;
8007 diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
8008 index d9334f31a5af..8682a5305cb3 100644
8009 --- a/kernel/dma/mapping.c
8010 +++ b/kernel/dma/mapping.c
8011 @@ -169,6 +169,8 @@ EXPORT_SYMBOL(dma_get_sgtable_attrs);
8012 */
8013 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
8014 {
8015 + if (force_dma_unencrypted(dev))
8016 + prot = pgprot_decrypted(prot);
8017 if (dev_is_dma_coherent(dev) ||
8018 (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
8019 (attrs & DMA_ATTR_NON_CONSISTENT)))
8020 diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
8021 index a949bd39e343..d44c8fd17609 100644
8022 --- a/kernel/irq/debugfs.c
8023 +++ b/kernel/irq/debugfs.c
8024 @@ -206,8 +206,15 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
8025 chip_bus_lock(desc);
8026 raw_spin_lock_irqsave(&desc->lock, flags);
8027
8028 - if (irq_settings_is_level(desc) || desc->istate & IRQS_NMI) {
8029 - /* Can't do level nor NMIs, sorry */
8030 + /*
8031 + * Don't allow injection when the interrupt is:
8032 + * - Level or NMI type
8033 + * - not activated
8034 + * - replaying already
8035 + */
8036 + if (irq_settings_is_level(desc) ||
8037 + !irqd_is_activated(&desc->irq_data) ||
8038 + (desc->istate & (IRQS_NMI | IRQS_REPLAY))) {
8039 err = -EINVAL;
8040 } else {
8041 desc->istate |= IRQS_PENDING;
8042 diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
8043 index 480df3659720..c776b8e86fbc 100644
8044 --- a/kernel/irq/irqdomain.c
8045 +++ b/kernel/irq/irqdomain.c
8046 @@ -1293,6 +1293,11 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
8047 unsigned int irq_base,
8048 unsigned int nr_irqs, void *arg)
8049 {
8050 + if (!domain->ops->alloc) {
8051 + pr_debug("domain->ops->alloc() is NULL\n");
8052 + return -ENOSYS;
8053 + }
8054 +
8055 return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
8056 }
8057
8058 @@ -1330,11 +1335,6 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
8059 return -EINVAL;
8060 }
8061
8062 - if (!domain->ops->alloc) {
8063 - pr_debug("domain->ops->alloc() is NULL\n");
8064 - return -ENOSYS;
8065 - }
8066 -
8067 if (realloc && irq_base >= 0) {
8068 virq = irq_base;
8069 } else {
8070 diff --git a/kernel/kmod.c b/kernel/kmod.c
8071 index bc6addd9152b..a2de58de6ab6 100644
8072 --- a/kernel/kmod.c
8073 +++ b/kernel/kmod.c
8074 @@ -120,7 +120,7 @@ out:
8075 * invoke it.
8076 *
8077 * If module auto-loading support is disabled then this function
8078 - * becomes a no-operation.
8079 + * simply returns -ENOENT.
8080 */
8081 int __request_module(bool wait, const char *fmt, ...)
8082 {
8083 @@ -137,7 +137,7 @@ int __request_module(bool wait, const char *fmt, ...)
8084 WARN_ON_ONCE(wait && current_is_async());
8085
8086 if (!modprobe_path[0])
8087 - return 0;
8088 + return -ENOENT;
8089
8090 va_start(args, fmt);
8091 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
8092 diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
8093 index 35d3b6925b1e..9ab1a965c3b9 100644
8094 --- a/kernel/locking/lockdep.c
8095 +++ b/kernel/locking/lockdep.c
8096 @@ -1719,9 +1719,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
8097 this.class = class;
8098
8099 raw_local_irq_save(flags);
8100 + current->lockdep_recursion = 1;
8101 arch_spin_lock(&lockdep_lock);
8102 ret = __lockdep_count_forward_deps(&this);
8103 arch_spin_unlock(&lockdep_lock);
8104 + current->lockdep_recursion = 0;
8105 raw_local_irq_restore(flags);
8106
8107 return ret;
8108 @@ -1746,9 +1748,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
8109 this.class = class;
8110
8111 raw_local_irq_save(flags);
8112 + current->lockdep_recursion = 1;
8113 arch_spin_lock(&lockdep_lock);
8114 ret = __lockdep_count_backward_deps(&this);
8115 arch_spin_unlock(&lockdep_lock);
8116 + current->lockdep_recursion = 0;
8117 raw_local_irq_restore(flags);
8118
8119 return ret;
8120 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
8121 index e921126aec84..195d0019e6bb 100644
8122 --- a/kernel/sched/core.c
8123 +++ b/kernel/sched/core.c
8124 @@ -3676,7 +3676,6 @@ static void sched_tick_remote(struct work_struct *work)
8125 if (cpu_is_offline(cpu))
8126 goto out_unlock;
8127
8128 - curr = rq->curr;
8129 update_rq_clock(rq);
8130
8131 if (!is_idle_task(curr)) {
8132 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
8133 index f32ce3a359fa..eeaf34d65742 100644
8134 --- a/kernel/sched/fair.c
8135 +++ b/kernel/sched/fair.c
8136 @@ -3927,6 +3927,7 @@ static inline void check_schedstat_required(void)
8137 #endif
8138 }
8139
8140 +static inline bool cfs_bandwidth_used(void);
8141
8142 /*
8143 * MIGRATION
8144 @@ -4005,10 +4006,16 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
8145 __enqueue_entity(cfs_rq, se);
8146 se->on_rq = 1;
8147
8148 - if (cfs_rq->nr_running == 1) {
8149 + /*
8150 + * When bandwidth control is enabled, cfs might have been removed
8151 + * because of a parent been throttled but cfs->nr_running > 1. Try to
8152 + * add it unconditionnally.
8153 + */
8154 + if (cfs_rq->nr_running == 1 || cfs_bandwidth_used())
8155 list_add_leaf_cfs_rq(cfs_rq);
8156 +
8157 + if (cfs_rq->nr_running == 1)
8158 check_enqueue_throttle(cfs_rq);
8159 - }
8160 }
8161
8162 static void __clear_buddies_last(struct sched_entity *se)
8163 @@ -4589,11 +4596,22 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
8164 break;
8165 }
8166
8167 - assert_list_leaf_cfs_rq(rq);
8168 -
8169 if (!se)
8170 add_nr_running(rq, task_delta);
8171
8172 + /*
8173 + * The cfs_rq_throttled() breaks in the above iteration can result in
8174 + * incomplete leaf list maintenance, resulting in triggering the
8175 + * assertion below.
8176 + */
8177 + for_each_sched_entity(se) {
8178 + cfs_rq = cfs_rq_of(se);
8179 +
8180 + list_add_leaf_cfs_rq(cfs_rq);
8181 + }
8182 +
8183 + assert_list_leaf_cfs_rq(rq);
8184 +
8185 /* Determine whether we need to wake up potentially idle CPU: */
8186 if (rq->curr == rq->idle && rq->cfs.nr_running)
8187 resched_curr(rq);
8188 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
8189 index e5e2605778c9..c7e7481968bf 100644
8190 --- a/kernel/sched/sched.h
8191 +++ b/kernel/sched/sched.h
8192 @@ -118,7 +118,13 @@ extern long calc_load_fold_active(struct rq *this_rq, long adjust);
8193 #ifdef CONFIG_64BIT
8194 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
8195 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
8196 -# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
8197 +# define scale_load_down(w) \
8198 +({ \
8199 + unsigned long __w = (w); \
8200 + if (__w) \
8201 + __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
8202 + __w; \
8203 +})
8204 #else
8205 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
8206 # define scale_load(w) (w)
8207 diff --git a/kernel/seccomp.c b/kernel/seccomp.c
8208 index 614a557a0814..2c697ce7be21 100644
8209 --- a/kernel/seccomp.c
8210 +++ b/kernel/seccomp.c
8211 @@ -1205,6 +1205,7 @@ static const struct file_operations seccomp_notify_ops = {
8212 .poll = seccomp_notify_poll,
8213 .release = seccomp_notify_release,
8214 .unlocked_ioctl = seccomp_notify_ioctl,
8215 + .compat_ioctl = seccomp_notify_ioctl,
8216 };
8217
8218 static struct file *init_listener(struct seccomp_filter *filter)
8219 diff --git a/kernel/signal.c b/kernel/signal.c
8220 index eea748174ade..7d3d35eb7a0b 100644
8221 --- a/kernel/signal.c
8222 +++ b/kernel/signal.c
8223 @@ -1931,7 +1931,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
8224 * This is only possible if parent == real_parent.
8225 * Check if it has changed security domain.
8226 */
8227 - if (tsk->parent_exec_id != tsk->parent->self_exec_id)
8228 + if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
8229 sig = SIGCHLD;
8230 }
8231
8232 diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
8233 index dbd69052eaa6..a5538dd76a81 100644
8234 --- a/kernel/time/sched_clock.c
8235 +++ b/kernel/time/sched_clock.c
8236 @@ -207,7 +207,8 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
8237
8238 if (sched_clock_timer.function != NULL) {
8239 /* update timeout for clock wrap */
8240 - hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
8241 + hrtimer_start(&sched_clock_timer, cd.wrap_kt,
8242 + HRTIMER_MODE_REL_HARD);
8243 }
8244
8245 r = rate;
8246 @@ -251,9 +252,9 @@ void __init generic_sched_clock_init(void)
8247 * Start the timer to keep sched_clock() properly updated and
8248 * sets the initial epoch.
8249 */
8250 - hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
8251 + hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
8252 sched_clock_timer.function = sched_clock_poll;
8253 - hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
8254 + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
8255 }
8256
8257 /*
8258 @@ -290,7 +291,7 @@ void sched_clock_resume(void)
8259 struct clock_read_data *rd = &cd.read_data[0];
8260
8261 rd->epoch_cyc = cd.actual_read_sched_clock();
8262 - hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
8263 + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
8264 rd->read_sched_clock = cd.actual_read_sched_clock;
8265 }
8266
8267 diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
8268 index 89bdac61233d..2372b861f2cf 100644
8269 --- a/kernel/trace/bpf_trace.c
8270 +++ b/kernel/trace/bpf_trace.c
8271 @@ -650,7 +650,7 @@ BPF_CALL_1(bpf_send_signal, u32, sig)
8272 if (unlikely(!nmi_uaccess_okay()))
8273 return -EPERM;
8274
8275 - if (in_nmi()) {
8276 + if (irqs_disabled()) {
8277 /* Do an early check on signal validity. Otherwise,
8278 * the error is lost in deferred irq_work.
8279 */
8280 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
8281 index 3f54dc2f6e1c..2f0f7fcee73e 100644
8282 --- a/kernel/trace/trace_kprobe.c
8283 +++ b/kernel/trace/trace_kprobe.c
8284 @@ -918,6 +918,8 @@ static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
8285 int i;
8286
8287 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
8288 + if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
8289 + seq_printf(m, "%d", tk->rp.maxactive);
8290 seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
8291 trace_probe_name(&tk->tp));
8292
8293 diff --git a/lib/test_xarray.c b/lib/test_xarray.c
8294 index 8c7d7a8468b8..d4f97925dbd8 100644
8295 --- a/lib/test_xarray.c
8296 +++ b/lib/test_xarray.c
8297 @@ -1156,6 +1156,42 @@ static noinline void check_find_entry(struct xarray *xa)
8298 XA_BUG_ON(xa, !xa_empty(xa));
8299 }
8300
8301 +static noinline void check_pause(struct xarray *xa)
8302 +{
8303 + XA_STATE(xas, xa, 0);
8304 + void *entry;
8305 + unsigned int order;
8306 + unsigned long index = 1;
8307 + unsigned int count = 0;
8308 +
8309 + for (order = 0; order < order_limit; order++) {
8310 + XA_BUG_ON(xa, xa_store_order(xa, index, order,
8311 + xa_mk_index(index), GFP_KERNEL));
8312 + index += 1UL << order;
8313 + }
8314 +
8315 + rcu_read_lock();
8316 + xas_for_each(&xas, entry, ULONG_MAX) {
8317 + XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
8318 + count++;
8319 + }
8320 + rcu_read_unlock();
8321 + XA_BUG_ON(xa, count != order_limit);
8322 +
8323 + count = 0;
8324 + xas_set(&xas, 0);
8325 + rcu_read_lock();
8326 + xas_for_each(&xas, entry, ULONG_MAX) {
8327 + XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
8328 + count++;
8329 + xas_pause(&xas);
8330 + }
8331 + rcu_read_unlock();
8332 + XA_BUG_ON(xa, count != order_limit);
8333 +
8334 + xa_destroy(xa);
8335 +}
8336 +
8337 static noinline void check_move_tiny(struct xarray *xa)
8338 {
8339 XA_STATE(xas, xa, 0);
8340 @@ -1664,6 +1700,7 @@ static int xarray_checks(void)
8341 check_xa_alloc();
8342 check_find(&array);
8343 check_find_entry(&array);
8344 + check_pause(&array);
8345 check_account(&array);
8346 check_destroy(&array);
8347 check_move(&array);
8348 diff --git a/lib/xarray.c b/lib/xarray.c
8349 index acd1fad2e862..08d71c7b7599 100644
8350 --- a/lib/xarray.c
8351 +++ b/lib/xarray.c
8352 @@ -970,7 +970,7 @@ void xas_pause(struct xa_state *xas)
8353
8354 xas->xa_node = XAS_RESTART;
8355 if (node) {
8356 - unsigned int offset = xas->xa_offset;
8357 + unsigned long offset = xas->xa_offset;
8358 while (++offset < XA_CHUNK_SIZE) {
8359 if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
8360 break;
8361 @@ -1208,6 +1208,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
8362 }
8363
8364 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
8365 + if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
8366 + continue;
8367 if (!xa_is_node(entry))
8368 return entry;
8369 xas->xa_node = xa_to_node(entry);
8370 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
8371 index 5d0575d633d2..8159000781be 100644
8372 --- a/mm/memcontrol.c
8373 +++ b/mm/memcontrol.c
8374 @@ -2441,6 +2441,9 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
8375 usage = page_counter_read(&memcg->memory);
8376 high = READ_ONCE(memcg->high);
8377
8378 + if (usage <= high)
8379 + continue;
8380 +
8381 /*
8382 * Prevent division by 0 in overage calculation by acting as if
8383 * it was a threshold of 1 page
8384 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
8385 index a293238fe1e7..2921fc276713 100644
8386 --- a/net/rxrpc/af_rxrpc.c
8387 +++ b/net/rxrpc/af_rxrpc.c
8388 @@ -285,7 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
8389 gfp_t gfp,
8390 rxrpc_notify_rx_t notify_rx,
8391 bool upgrade,
8392 - bool intr,
8393 + enum rxrpc_interruptibility interruptibility,
8394 unsigned int debug_id)
8395 {
8396 struct rxrpc_conn_parameters cp;
8397 @@ -310,7 +310,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
8398 memset(&p, 0, sizeof(p));
8399 p.user_call_ID = user_call_ID;
8400 p.tx_total_len = tx_total_len;
8401 - p.intr = intr;
8402 + p.interruptibility = interruptibility;
8403
8404 memset(&cp, 0, sizeof(cp));
8405 cp.local = rx->local;
8406 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
8407 index 394d18857979..3eb1ab40ca5c 100644
8408 --- a/net/rxrpc/ar-internal.h
8409 +++ b/net/rxrpc/ar-internal.h
8410 @@ -489,7 +489,6 @@ enum rxrpc_call_flag {
8411 RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
8412 RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
8413 RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */
8414 - RXRPC_CALL_IS_INTR, /* The call is interruptible */
8415 RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */
8416 };
8417
8418 @@ -598,6 +597,7 @@ struct rxrpc_call {
8419 atomic_t usage;
8420 u16 service_id; /* service ID */
8421 u8 security_ix; /* Security type */
8422 + enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */
8423 u32 call_id; /* call ID on connection */
8424 u32 cid; /* connection ID plus channel index */
8425 int debug_id; /* debug ID for printks */
8426 @@ -720,7 +720,7 @@ struct rxrpc_call_params {
8427 u32 normal; /* Max time since last call packet (msec) */
8428 } timeouts;
8429 u8 nr_timeouts; /* Number of timeouts specified */
8430 - bool intr; /* The call is interruptible */
8431 + enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */
8432 };
8433
8434 struct rxrpc_send_params {
8435 diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
8436 index c9f34b0a11df..f07970207b54 100644
8437 --- a/net/rxrpc/call_object.c
8438 +++ b/net/rxrpc/call_object.c
8439 @@ -237,8 +237,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
8440 return call;
8441 }
8442
8443 - if (p->intr)
8444 - __set_bit(RXRPC_CALL_IS_INTR, &call->flags);
8445 + call->interruptibility = p->interruptibility;
8446 call->tx_total_len = p->tx_total_len;
8447 trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
8448 atomic_read(&call->usage),
8449 diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
8450 index ea7d4c21f889..f2a1a5dbb5a7 100644
8451 --- a/net/rxrpc/conn_client.c
8452 +++ b/net/rxrpc/conn_client.c
8453 @@ -655,13 +655,20 @@ static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
8454
8455 add_wait_queue_exclusive(&call->waitq, &myself);
8456 for (;;) {
8457 - if (test_bit(RXRPC_CALL_IS_INTR, &call->flags))
8458 + switch (call->interruptibility) {
8459 + case RXRPC_INTERRUPTIBLE:
8460 + case RXRPC_PREINTERRUPTIBLE:
8461 set_current_state(TASK_INTERRUPTIBLE);
8462 - else
8463 + break;
8464 + case RXRPC_UNINTERRUPTIBLE:
8465 + default:
8466 set_current_state(TASK_UNINTERRUPTIBLE);
8467 + break;
8468 + }
8469 if (call->call_id)
8470 break;
8471 - if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) &&
8472 + if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
8473 + call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
8474 signal_pending(current)) {
8475 ret = -ERESTARTSYS;
8476 break;
8477 diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
8478 index 136eb465bfcb..0fcf157aa09f 100644
8479 --- a/net/rxrpc/sendmsg.c
8480 +++ b/net/rxrpc/sendmsg.c
8481 @@ -17,6 +17,21 @@
8482 #include <net/af_rxrpc.h>
8483 #include "ar-internal.h"
8484
8485 +/*
8486 + * Return true if there's sufficient Tx queue space.
8487 + */
8488 +static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win)
8489 +{
8490 + unsigned int win_size =
8491 + min_t(unsigned int, call->tx_winsize,
8492 + call->cong_cwnd + call->cong_extra);
8493 + rxrpc_seq_t tx_win = READ_ONCE(call->tx_hard_ack);
8494 +
8495 + if (_tx_win)
8496 + *_tx_win = tx_win;
8497 + return call->tx_top - tx_win < win_size;
8498 +}
8499 +
8500 /*
8501 * Wait for space to appear in the Tx queue or a signal to occur.
8502 */
8503 @@ -26,9 +41,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
8504 {
8505 for (;;) {
8506 set_current_state(TASK_INTERRUPTIBLE);
8507 - if (call->tx_top - call->tx_hard_ack <
8508 - min_t(unsigned int, call->tx_winsize,
8509 - call->cong_cwnd + call->cong_extra))
8510 + if (rxrpc_check_tx_space(call, NULL))
8511 return 0;
8512
8513 if (call->state >= RXRPC_CALL_COMPLETE)
8514 @@ -49,7 +62,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
8515 * Wait for space to appear in the Tx queue uninterruptibly, but with
8516 * a timeout of 2*RTT if no progress was made and a signal occurred.
8517 */
8518 -static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
8519 +static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
8520 struct rxrpc_call *call)
8521 {
8522 rxrpc_seq_t tx_start, tx_win;
8523 @@ -68,16 +81,13 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
8524 set_current_state(TASK_UNINTERRUPTIBLE);
8525
8526 tx_win = READ_ONCE(call->tx_hard_ack);
8527 - if (call->tx_top - tx_win <
8528 - min_t(unsigned int, call->tx_winsize,
8529 - call->cong_cwnd + call->cong_extra))
8530 + if (rxrpc_check_tx_space(call, &tx_win))
8531 return 0;
8532
8533 if (call->state >= RXRPC_CALL_COMPLETE)
8534 return call->error;
8535
8536 - if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) &&
8537 - timeout == 0 &&
8538 + if (timeout == 0 &&
8539 tx_win == tx_start && signal_pending(current))
8540 return -EINTR;
8541
8542 @@ -91,6 +101,26 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
8543 }
8544 }
8545
8546 +/*
8547 + * Wait for space to appear in the Tx queue uninterruptibly.
8548 + */
8549 +static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
8550 + struct rxrpc_call *call,
8551 + long *timeo)
8552 +{
8553 + for (;;) {
8554 + set_current_state(TASK_UNINTERRUPTIBLE);
8555 + if (rxrpc_check_tx_space(call, NULL))
8556 + return 0;
8557 +
8558 + if (call->state >= RXRPC_CALL_COMPLETE)
8559 + return call->error;
8560 +
8561 + trace_rxrpc_transmit(call, rxrpc_transmit_wait);
8562 + *timeo = schedule_timeout(*timeo);
8563 + }
8564 +}
8565 +
8566 /*
8567 * wait for space to appear in the transmit/ACK window
8568 * - caller holds the socket locked
8569 @@ -108,10 +138,19 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
8570
8571 add_wait_queue(&call->waitq, &myself);
8572
8573 - if (waitall)
8574 - ret = rxrpc_wait_for_tx_window_nonintr(rx, call);
8575 - else
8576 - ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo);
8577 + switch (call->interruptibility) {
8578 + case RXRPC_INTERRUPTIBLE:
8579 + if (waitall)
8580 + ret = rxrpc_wait_for_tx_window_waitall(rx, call);
8581 + else
8582 + ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo);
8583 + break;
8584 + case RXRPC_PREINTERRUPTIBLE:
8585 + case RXRPC_UNINTERRUPTIBLE:
8586 + default:
8587 + ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo);
8588 + break;
8589 + }
8590
8591 remove_wait_queue(&call->waitq, &myself);
8592 set_current_state(TASK_RUNNING);
8593 @@ -302,9 +341,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
8594
8595 _debug("alloc");
8596
8597 - if (call->tx_top - call->tx_hard_ack >=
8598 - min_t(unsigned int, call->tx_winsize,
8599 - call->cong_cwnd + call->cong_extra)) {
8600 + if (!rxrpc_check_tx_space(call, NULL)) {
8601 ret = -EAGAIN;
8602 if (msg->msg_flags & MSG_DONTWAIT)
8603 goto maybe_error;
8604 @@ -619,7 +656,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
8605 .call.tx_total_len = -1,
8606 .call.user_call_ID = 0,
8607 .call.nr_timeouts = 0,
8608 - .call.intr = true,
8609 + .call.interruptibility = RXRPC_INTERRUPTIBLE,
8610 .abort_code = 0,
8611 .command = RXRPC_CMD_SEND_DATA,
8612 .exclusive = false,
8613 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
8614 index aef240fdf8df..328402ab64a3 100644
8615 --- a/net/wireless/scan.c
8616 +++ b/net/wireless/scan.c
8617 @@ -2022,7 +2022,11 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
8618
8619 spin_lock_bh(&rdev->bss_lock);
8620
8621 - if (WARN_ON(cbss->pub.channel == chan))
8622 + /*
8623 + * Some APs use CSA also for bandwidth changes, i.e., without actually
8624 + * changing the control channel, so no need to update in such a case.
8625 + */
8626 + if (cbss->pub.channel == chan)
8627 goto done;
8628
8629 /* use transmitting bss */
8630 diff --git a/security/keys/key.c b/security/keys/key.c
8631 index 764f4c57913e..e9845d0d8d34 100644
8632 --- a/security/keys/key.c
8633 +++ b/security/keys/key.c
8634 @@ -381,7 +381,7 @@ int key_payload_reserve(struct key *key, size_t datalen)
8635 spin_lock(&key->user->lock);
8636
8637 if (delta > 0 &&
8638 - (key->user->qnbytes + delta >= maxbytes ||
8639 + (key->user->qnbytes + delta > maxbytes ||
8640 key->user->qnbytes + delta < key->user->qnbytes)) {
8641 ret = -EDQUOT;
8642 }
8643 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
8644 index 9b898c969558..d1a3dea58dee 100644
8645 --- a/security/keys/keyctl.c
8646 +++ b/security/keys/keyctl.c
8647 @@ -937,8 +937,8 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
8648 key_quota_root_maxbytes : key_quota_maxbytes;
8649
8650 spin_lock(&newowner->lock);
8651 - if (newowner->qnkeys + 1 >= maxkeys ||
8652 - newowner->qnbytes + key->quotalen >= maxbytes ||
8653 + if (newowner->qnkeys + 1 > maxkeys ||
8654 + newowner->qnbytes + key->quotalen > maxbytes ||
8655 newowner->qnbytes + key->quotalen <
8656 newowner->qnbytes)
8657 goto quota_overrun;
8658 diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
8659 index 732bbede7ebf..8539047145de 100644
8660 --- a/sound/core/oss/pcm_plugin.c
8661 +++ b/sound/core/oss/pcm_plugin.c
8662 @@ -196,7 +196,9 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin)
8663 return 0;
8664 }
8665
8666 -snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t drv_frames)
8667 +static snd_pcm_sframes_t plug_client_size(struct snd_pcm_substream *plug,
8668 + snd_pcm_uframes_t drv_frames,
8669 + bool check_size)
8670 {
8671 struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next;
8672 int stream;
8673 @@ -209,7 +211,7 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
8674 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
8675 plugin = snd_pcm_plug_last(plug);
8676 while (plugin && drv_frames > 0) {
8677 - if (drv_frames > plugin->buf_frames)
8678 + if (check_size && drv_frames > plugin->buf_frames)
8679 drv_frames = plugin->buf_frames;
8680 plugin_prev = plugin->prev;
8681 if (plugin->src_frames)
8682 @@ -222,7 +224,7 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
8683 plugin_next = plugin->next;
8684 if (plugin->dst_frames)
8685 drv_frames = plugin->dst_frames(plugin, drv_frames);
8686 - if (drv_frames > plugin->buf_frames)
8687 + if (check_size && drv_frames > plugin->buf_frames)
8688 drv_frames = plugin->buf_frames;
8689 plugin = plugin_next;
8690 }
8691 @@ -231,7 +233,9 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
8692 return drv_frames;
8693 }
8694
8695 -snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t clt_frames)
8696 +static snd_pcm_sframes_t plug_slave_size(struct snd_pcm_substream *plug,
8697 + snd_pcm_uframes_t clt_frames,
8698 + bool check_size)
8699 {
8700 struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next;
8701 snd_pcm_sframes_t frames;
8702 @@ -252,14 +256,14 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
8703 if (frames < 0)
8704 return frames;
8705 }
8706 - if (frames > plugin->buf_frames)
8707 + if (check_size && frames > plugin->buf_frames)
8708 frames = plugin->buf_frames;
8709 plugin = plugin_next;
8710 }
8711 } else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
8712 plugin = snd_pcm_plug_last(plug);
8713 while (plugin) {
8714 - if (frames > plugin->buf_frames)
8715 + if (check_size && frames > plugin->buf_frames)
8716 frames = plugin->buf_frames;
8717 plugin_prev = plugin->prev;
8718 if (plugin->src_frames) {
8719 @@ -274,6 +278,18 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
8720 return frames;
8721 }
8722
8723 +snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug,
8724 + snd_pcm_uframes_t drv_frames)
8725 +{
8726 + return plug_client_size(plug, drv_frames, false);
8727 +}
8728 +
8729 +snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug,
8730 + snd_pcm_uframes_t clt_frames)
8731 +{
8732 + return plug_slave_size(plug, clt_frames, false);
8733 +}
8734 +
8735 static int snd_pcm_plug_formats(const struct snd_mask *mask,
8736 snd_pcm_format_t format)
8737 {
8738 @@ -630,7 +646,7 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
8739 src_channels = dst_channels;
8740 plugin = next;
8741 }
8742 - return snd_pcm_plug_client_size(plug, frames);
8743 + return plug_client_size(plug, frames, true);
8744 }
8745
8746 snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, struct snd_pcm_plugin_channel *dst_channels_final, snd_pcm_uframes_t size)
8747 @@ -640,7 +656,7 @@ snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, str
8748 snd_pcm_sframes_t frames = size;
8749 int err;
8750
8751 - frames = snd_pcm_plug_slave_size(plug, frames);
8752 + frames = plug_slave_size(plug, frames, true);
8753 if (frames < 0)
8754 return frames;
8755
8756 diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
8757 index b7d9160ed868..c6e1e03a5e4d 100644
8758 --- a/sound/pci/hda/hda_beep.c
8759 +++ b/sound/pci/hda/hda_beep.c
8760 @@ -290,8 +290,12 @@ int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol,
8761 {
8762 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
8763 struct hda_beep *beep = codec->beep;
8764 + int chs = get_amp_channels(kcontrol);
8765 +
8766 if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) {
8767 - ucontrol->value.integer.value[0] =
8768 + if (chs & 1)
8769 + ucontrol->value.integer.value[0] = beep->enabled;
8770 + if (chs & 2)
8771 ucontrol->value.integer.value[1] = beep->enabled;
8772 return 0;
8773 }
8774 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
8775 index 85beb172d810..1db9d0579c72 100644
8776 --- a/sound/pci/hda/hda_intel.c
8777 +++ b/sound/pci/hda/hda_intel.c
8778 @@ -2024,6 +2024,17 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
8779 #endif
8780 }
8781
8782 +/* Blacklist for skipping the whole probe:
8783 + * some HD-audio PCI entries are exposed without any codecs, and such devices
8784 + * should be ignored from the beginning.
8785 + */
8786 +static const struct snd_pci_quirk driver_blacklist[] = {
8787 + SND_PCI_QUIRK(0x1043, 0x874f, "ASUS ROG Zenith II / Strix", 0),
8788 + SND_PCI_QUIRK(0x1462, 0xcb59, "MSI TRX40 Creator", 0),
8789 + SND_PCI_QUIRK(0x1462, 0xcb60, "MSI TRX40", 0),
8790 + {}
8791 +};
8792 +
8793 static const struct hda_controller_ops pci_hda_ops = {
8794 .disable_msi_reset_irq = disable_msi_reset_irq,
8795 .pcm_mmap_prepare = pcm_mmap_prepare,
8796 @@ -2059,6 +2070,11 @@ static int azx_probe(struct pci_dev *pci,
8797 bool schedule_probe;
8798 int err;
8799
8800 + if (snd_pci_quirk_lookup(pci, driver_blacklist)) {
8801 + dev_info(&pci->dev, "Skipping the blacklisted device\n");
8802 + return -ENODEV;
8803 + }
8804 +
8805 if (dev >= SNDRV_CARDS)
8806 return -ENODEV;
8807 if (!enable[dev]) {
8808 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
8809 index 128db2e6bc64..0fcbb6ab2d58 100644
8810 --- a/sound/pci/hda/patch_realtek.c
8811 +++ b/sound/pci/hda/patch_realtek.c
8812 @@ -107,6 +107,7 @@ struct alc_spec {
8813 unsigned int done_hp_init:1;
8814 unsigned int no_shutup_pins:1;
8815 unsigned int ultra_low_power:1;
8816 + unsigned int has_hs_key:1;
8817
8818 /* for PLL fix */
8819 hda_nid_t pll_nid;
8820 @@ -367,7 +368,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
8821 case 0x10ec0215:
8822 case 0x10ec0233:
8823 case 0x10ec0235:
8824 + case 0x10ec0236:
8825 case 0x10ec0255:
8826 + case 0x10ec0256:
8827 case 0x10ec0257:
8828 case 0x10ec0282:
8829 case 0x10ec0283:
8830 @@ -379,11 +382,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
8831 case 0x10ec0300:
8832 alc_update_coef_idx(codec, 0x10, 1<<9, 0);
8833 break;
8834 - case 0x10ec0236:
8835 - case 0x10ec0256:
8836 - alc_write_coef_idx(codec, 0x36, 0x5757);
8837 - alc_update_coef_idx(codec, 0x10, 1<<9, 0);
8838 - break;
8839 case 0x10ec0275:
8840 alc_update_coef_idx(codec, 0xe, 0, 1<<0);
8841 break;
8842 @@ -2449,6 +2447,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
8843 SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
8844 SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
8845 SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
8846 + SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
8847 SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
8848 SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
8849 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
8850 @@ -2982,6 +2981,107 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
8851 return alc_parse_auto_config(codec, alc269_ignore, ssids);
8852 }
8853
8854 +static const struct hda_jack_keymap alc_headset_btn_keymap[] = {
8855 + { SND_JACK_BTN_0, KEY_PLAYPAUSE },
8856 + { SND_JACK_BTN_1, KEY_VOICECOMMAND },
8857 + { SND_JACK_BTN_2, KEY_VOLUMEUP },
8858 + { SND_JACK_BTN_3, KEY_VOLUMEDOWN },
8859 + {}
8860 +};
8861 +
8862 +static void alc_headset_btn_callback(struct hda_codec *codec,
8863 + struct hda_jack_callback *jack)
8864 +{
8865 + int report = 0;
8866 +
8867 + if (jack->unsol_res & (7 << 13))
8868 + report |= SND_JACK_BTN_0;
8869 +
8870 + if (jack->unsol_res & (1 << 16 | 3 << 8))
8871 + report |= SND_JACK_BTN_1;
8872 +
8873 + /* Volume up key */
8874 + if (jack->unsol_res & (7 << 23))
8875 + report |= SND_JACK_BTN_2;
8876 +
8877 + /* Volume down key */
8878 + if (jack->unsol_res & (7 << 10))
8879 + report |= SND_JACK_BTN_3;
8880 +
8881 + jack->jack->button_state = report;
8882 +}
8883 +
8884 +static void alc_disable_headset_jack_key(struct hda_codec *codec)
8885 +{
8886 + struct alc_spec *spec = codec->spec;
8887 +
8888 + if (!spec->has_hs_key)
8889 + return;
8890 +
8891 + switch (codec->core.vendor_id) {
8892 + case 0x10ec0215:
8893 + case 0x10ec0225:
8894 + case 0x10ec0285:
8895 + case 0x10ec0295:
8896 + case 0x10ec0289:
8897 + case 0x10ec0299:
8898 + alc_write_coef_idx(codec, 0x48, 0x0);
8899 + alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
8900 + alc_update_coef_idx(codec, 0x44, 0x0045 << 8, 0x0);
8901 + break;
8902 + case 0x10ec0236:
8903 + case 0x10ec0256:
8904 + alc_write_coef_idx(codec, 0x48, 0x0);
8905 + alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
8906 + break;
8907 + }
8908 +}
8909 +
8910 +static void alc_enable_headset_jack_key(struct hda_codec *codec)
8911 +{
8912 + struct alc_spec *spec = codec->spec;
8913 +
8914 + if (!spec->has_hs_key)
8915 + return;
8916 +
8917 + switch (codec->core.vendor_id) {
8918 + case 0x10ec0215:
8919 + case 0x10ec0225:
8920 + case 0x10ec0285:
8921 + case 0x10ec0295:
8922 + case 0x10ec0289:
8923 + case 0x10ec0299:
8924 + alc_write_coef_idx(codec, 0x48, 0xd011);
8925 + alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
8926 + alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8);
8927 + break;
8928 + case 0x10ec0236:
8929 + case 0x10ec0256:
8930 + alc_write_coef_idx(codec, 0x48, 0xd011);
8931 + alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
8932 + break;
8933 + }
8934 +}
8935 +
8936 +static void alc_fixup_headset_jack(struct hda_codec *codec,
8937 + const struct hda_fixup *fix, int action)
8938 +{
8939 + struct alc_spec *spec = codec->spec;
8940 +
8941 + switch (action) {
8942 + case HDA_FIXUP_ACT_PRE_PROBE:
8943 + spec->has_hs_key = 1;
8944 + snd_hda_jack_detect_enable_callback(codec, 0x55,
8945 + alc_headset_btn_callback);
8946 + snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
8947 + SND_JACK_HEADSET, alc_headset_btn_keymap);
8948 + break;
8949 + case HDA_FIXUP_ACT_INIT:
8950 + alc_enable_headset_jack_key(codec);
8951 + break;
8952 + }
8953 +}
8954 +
8955 static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
8956 {
8957 alc_update_coef_idx(codec, 0x04, 1 << 11, power_up ? (1 << 11) : 0);
8958 @@ -3269,7 +3369,13 @@ static void alc256_init(struct hda_codec *codec)
8959 alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
8960 alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */
8961 alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15);
8962 - alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
8963 + /*
8964 + * Expose headphone mic (or possibly Line In on some machines) instead
8965 + * of PC Beep on 1Ah, and disable 1Ah loopback for all outputs. See
8966 + * Documentation/sound/hd-audio/realtek-pc-beep.rst for details of
8967 + * this register.
8968 + */
8969 + alc_write_coef_idx(codec, 0x36, 0x5757);
8970 }
8971
8972 static void alc256_shutup(struct hda_codec *codec)
8973 @@ -3372,6 +3478,8 @@ static void alc225_shutup(struct hda_codec *codec)
8974
8975 if (!hp_pin)
8976 hp_pin = 0x21;
8977 +
8978 + alc_disable_headset_jack_key(codec);
8979 /* 3k pull low control for Headset jack. */
8980 alc_update_coef_idx(codec, 0x4a, 0, 3 << 10);
8981
8982 @@ -3411,6 +3519,9 @@ static void alc225_shutup(struct hda_codec *codec)
8983 alc_update_coef_idx(codec, 0x4a, 3<<4, 2<<4);
8984 msleep(30);
8985 }
8986 +
8987 + alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
8988 + alc_enable_headset_jack_key(codec);
8989 }
8990
8991 static void alc_default_init(struct hda_codec *codec)
8992 @@ -4008,6 +4119,12 @@ static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
8993 alc_fixup_hp_gpio_led(codec, action, 0x08, 0x10);
8994 }
8995
8996 +static void alc285_fixup_hp_gpio_led(struct hda_codec *codec,
8997 + const struct hda_fixup *fix, int action)
8998 +{
8999 + alc_fixup_hp_gpio_led(codec, action, 0x04, 0x00);
9000 +}
9001 +
9002 static void alc286_fixup_hp_gpio_led(struct hda_codec *codec,
9003 const struct hda_fixup *fix, int action)
9004 {
9005 @@ -5375,17 +5492,6 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
9006 }
9007 }
9008
9009 -static void alc256_fixup_dell_xps_13_headphone_noise2(struct hda_codec *codec,
9010 - const struct hda_fixup *fix,
9011 - int action)
9012 -{
9013 - if (action != HDA_FIXUP_ACT_PRE_PROBE)
9014 - return;
9015 -
9016 - snd_hda_codec_amp_stereo(codec, 0x1a, HDA_INPUT, 0, HDA_AMP_VOLMASK, 1);
9017 - snd_hda_override_wcaps(codec, 0x1a, get_wcaps(codec, 0x1a) & ~AC_WCAP_IN_AMP);
9018 -}
9019 -
9020 static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
9021 const struct hda_fixup *fix,
9022 int action)
9023 @@ -5662,69 +5768,6 @@ static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
9024 snd_hda_override_wcaps(codec, 0x03, 0);
9025 }
9026
9027 -static const struct hda_jack_keymap alc_headset_btn_keymap[] = {
9028 - { SND_JACK_BTN_0, KEY_PLAYPAUSE },
9029 - { SND_JACK_BTN_1, KEY_VOICECOMMAND },
9030 - { SND_JACK_BTN_2, KEY_VOLUMEUP },
9031 - { SND_JACK_BTN_3, KEY_VOLUMEDOWN },
9032 - {}
9033 -};
9034 -
9035 -static void alc_headset_btn_callback(struct hda_codec *codec,
9036 - struct hda_jack_callback *jack)
9037 -{
9038 - int report = 0;
9039 -
9040 - if (jack->unsol_res & (7 << 13))
9041 - report |= SND_JACK_BTN_0;
9042 -
9043 - if (jack->unsol_res & (1 << 16 | 3 << 8))
9044 - report |= SND_JACK_BTN_1;
9045 -
9046 - /* Volume up key */
9047 - if (jack->unsol_res & (7 << 23))
9048 - report |= SND_JACK_BTN_2;
9049 -
9050 - /* Volume down key */
9051 - if (jack->unsol_res & (7 << 10))
9052 - report |= SND_JACK_BTN_3;
9053 -
9054 - jack->jack->button_state = report;
9055 -}
9056 -
9057 -static void alc_fixup_headset_jack(struct hda_codec *codec,
9058 - const struct hda_fixup *fix, int action)
9059 -{
9060 -
9061 - switch (action) {
9062 - case HDA_FIXUP_ACT_PRE_PROBE:
9063 - snd_hda_jack_detect_enable_callback(codec, 0x55,
9064 - alc_headset_btn_callback);
9065 - snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
9066 - SND_JACK_HEADSET, alc_headset_btn_keymap);
9067 - break;
9068 - case HDA_FIXUP_ACT_INIT:
9069 - switch (codec->core.vendor_id) {
9070 - case 0x10ec0215:
9071 - case 0x10ec0225:
9072 - case 0x10ec0285:
9073 - case 0x10ec0295:
9074 - case 0x10ec0289:
9075 - case 0x10ec0299:
9076 - alc_write_coef_idx(codec, 0x48, 0xd011);
9077 - alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
9078 - alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8);
9079 - break;
9080 - case 0x10ec0236:
9081 - case 0x10ec0256:
9082 - alc_write_coef_idx(codec, 0x48, 0xd011);
9083 - alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
9084 - break;
9085 - }
9086 - break;
9087 - }
9088 -}
9089 -
9090 static void alc295_fixup_chromebook(struct hda_codec *codec,
9091 const struct hda_fixup *fix, int action)
9092 {
9093 @@ -5863,8 +5906,6 @@ enum {
9094 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
9095 ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
9096 ALC275_FIXUP_DELL_XPS,
9097 - ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
9098 - ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2,
9099 ALC293_FIXUP_LENOVO_SPK_NOISE,
9100 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
9101 ALC255_FIXUP_DELL_SPK_NOISE,
9102 @@ -5923,6 +5964,7 @@ enum {
9103 ALC294_FIXUP_ASUS_DUAL_SPK,
9104 ALC285_FIXUP_THINKPAD_HEADSET_JACK,
9105 ALC294_FIXUP_ASUS_HPE,
9106 + ALC285_FIXUP_HP_GPIO_LED,
9107 };
9108
9109 static const struct hda_fixup alc269_fixups[] = {
9110 @@ -6604,23 +6646,6 @@ static const struct hda_fixup alc269_fixups[] = {
9111 {}
9112 }
9113 },
9114 - [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = {
9115 - .type = HDA_FIXUP_VERBS,
9116 - .v.verbs = (const struct hda_verb[]) {
9117 - /* Disable pass-through path for FRONT 14h */
9118 - {0x20, AC_VERB_SET_COEF_INDEX, 0x36},
9119 - {0x20, AC_VERB_SET_PROC_COEF, 0x1737},
9120 - {}
9121 - },
9122 - .chained = true,
9123 - .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
9124 - },
9125 - [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2] = {
9126 - .type = HDA_FIXUP_FUNC,
9127 - .v.func = alc256_fixup_dell_xps_13_headphone_noise2,
9128 - .chained = true,
9129 - .chain_id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE
9130 - },
9131 [ALC293_FIXUP_LENOVO_SPK_NOISE] = {
9132 .type = HDA_FIXUP_FUNC,
9133 .v.func = alc_fixup_disable_aamix,
9134 @@ -7061,6 +7086,10 @@ static const struct hda_fixup alc269_fixups[] = {
9135 .chained = true,
9136 .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
9137 },
9138 + [ALC285_FIXUP_HP_GPIO_LED] = {
9139 + .type = HDA_FIXUP_FUNC,
9140 + .v.func = alc285_fixup_hp_gpio_led,
9141 + },
9142 };
9143
9144 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9145 @@ -7114,17 +7143,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9146 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
9147 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
9148 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
9149 - SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
9150 SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
9151 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
9152 SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
9153 - SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
9154 SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
9155 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
9156 SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
9157 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
9158 SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
9159 - SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
9160 SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
9161 SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
9162 SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
9163 @@ -7208,6 +7234,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9164 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
9165 SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
9166 SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
9167 + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
9168 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
9169 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
9170 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
9171 @@ -7299,6 +7326,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9172 SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
9173 SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
9174 SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
9175 + SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
9176 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
9177 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
9178 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
9179 @@ -7477,7 +7505,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
9180 {.id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc298-dell1"},
9181 {.id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, .name = "alc298-dell-aio"},
9182 {.id = ALC275_FIXUP_DELL_XPS, .name = "alc275-dell-xps"},
9183 - {.id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, .name = "alc256-dell-xps13"},
9184 {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
9185 {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
9186 {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
9187 diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
9188 index 9d71e9d5c9a0..3cf41c11a405 100644
9189 --- a/sound/pci/ice1712/prodigy_hifi.c
9190 +++ b/sound/pci/ice1712/prodigy_hifi.c
9191 @@ -536,7 +536,7 @@ static int wm_adc_mux_enum_get(struct snd_kcontrol *kcontrol,
9192 struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
9193
9194 mutex_lock(&ice->gpio_mutex);
9195 - ucontrol->value.integer.value[0] = wm_get(ice, WM_ADC_MUX) & 0x1f;
9196 + ucontrol->value.enumerated.item[0] = wm_get(ice, WM_ADC_MUX) & 0x1f;
9197 mutex_unlock(&ice->gpio_mutex);
9198 return 0;
9199 }
9200 @@ -550,7 +550,7 @@ static int wm_adc_mux_enum_put(struct snd_kcontrol *kcontrol,
9201
9202 mutex_lock(&ice->gpio_mutex);
9203 oval = wm_get(ice, WM_ADC_MUX);
9204 - nval = (oval & 0xe0) | ucontrol->value.integer.value[0];
9205 + nval = (oval & 0xe0) | ucontrol->value.enumerated.item[0];
9206 if (nval != oval) {
9207 wm_put(ice, WM_ADC_MUX, nval);
9208 change = 1;
9209 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
9210 index ebd785f9aa46..e0ff40b10d85 100644
9211 --- a/sound/soc/soc-dapm.c
9212 +++ b/sound/soc/soc-dapm.c
9213 @@ -802,7 +802,13 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i,
9214 val = max - val;
9215 p->connect = !!val;
9216 } else {
9217 - p->connect = 0;
9218 + /* since a virtual mixer has no backing registers to
9219 + * decide which path to connect, it will try to match
9220 + * with initial state. This is to ensure
9221 + * that the default mixer choice will be
9222 + * correctly powered up during initialization.
9223 + */
9224 + p->connect = invert;
9225 }
9226 }
9227
9228 diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
9229 index f4dc3d445aae..95fc24580f85 100644
9230 --- a/sound/soc/soc-ops.c
9231 +++ b/sound/soc/soc-ops.c
9232 @@ -832,7 +832,7 @@ int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol,
9233 unsigned int regbase = mc->regbase;
9234 unsigned int regcount = mc->regcount;
9235 unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
9236 - unsigned int regwmask = (1<<regwshift)-1;
9237 + unsigned int regwmask = (1UL<<regwshift)-1;
9238 unsigned int invert = mc->invert;
9239 unsigned long mask = (1UL<<mc->nbits)-1;
9240 long min = mc->min;
9241 @@ -881,7 +881,7 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
9242 unsigned int regbase = mc->regbase;
9243 unsigned int regcount = mc->regcount;
9244 unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
9245 - unsigned int regwmask = (1<<regwshift)-1;
9246 + unsigned int regwmask = (1UL<<regwshift)-1;
9247 unsigned int invert = mc->invert;
9248 unsigned long mask = (1UL<<mc->nbits)-1;
9249 long max = mc->max;
9250 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
9251 index d978df95c5c6..cc4e9aa80fb0 100644
9252 --- a/sound/soc/soc-pcm.c
9253 +++ b/sound/soc/soc-pcm.c
9254 @@ -2222,7 +2222,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
9255 switch (cmd) {
9256 case SNDRV_PCM_TRIGGER_START:
9257 if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
9258 - (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
9259 + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
9260 + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
9261 continue;
9262
9263 ret = dpcm_do_trigger(dpcm, be_substream, cmd);
9264 @@ -2252,7 +2253,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
9265 be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
9266 break;
9267 case SNDRV_PCM_TRIGGER_STOP:
9268 - if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
9269 + if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) &&
9270 + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
9271 continue;
9272
9273 if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
9274 diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
9275 index b19ecaf0febf..17556a47f727 100644
9276 --- a/sound/soc/soc-topology.c
9277 +++ b/sound/soc/soc-topology.c
9278 @@ -362,7 +362,7 @@ static int soc_tplg_add_kcontrol(struct soc_tplg *tplg,
9279 struct snd_soc_component *comp = tplg->comp;
9280
9281 return soc_tplg_add_dcontrol(comp->card->snd_card,
9282 - comp->dev, k, NULL, comp, kcontrol);
9283 + comp->dev, k, comp->name_prefix, comp, kcontrol);
9284 }
9285
9286 /* remove a mixer kcontrol */
9287 diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
9288 index 10eb4b8e8e7e..d3259de43712 100644
9289 --- a/sound/soc/stm/stm32_sai_sub.c
9290 +++ b/sound/soc/stm/stm32_sai_sub.c
9291 @@ -1551,8 +1551,10 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
9292
9293 ret = snd_soc_register_component(&pdev->dev, &stm32_component,
9294 &sai->cpu_dai_drv, 1);
9295 - if (ret)
9296 + if (ret) {
9297 + snd_dmaengine_pcm_unregister(&pdev->dev);
9298 return ret;
9299 + }
9300
9301 if (STM_SAI_PROTOCOL_IS_SPDIF(sai))
9302 conf = &stm32_sai_pcm_config_spdif;
9303 diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
9304 index 73baf398c84a..f6a67eecb063 100644
9305 --- a/sound/usb/mixer_maps.c
9306 +++ b/sound/usb/mixer_maps.c
9307 @@ -349,6 +349,14 @@ static const struct usbmix_name_map dell_alc4020_map[] = {
9308 { 0 }
9309 };
9310
9311 +/* Some mobos shipped with a dummy HD-audio show the invalid GET_MIN/GET_MAX
9312 + * response for Input Gain Pad (id=19, control=12). Skip it.
9313 + */
9314 +static const struct usbmix_name_map asus_rog_map[] = {
9315 + { 19, NULL, 12 }, /* FU, Input Gain Pad */
9316 + {}
9317 +};
9318 +
9319 /*
9320 * Control map entries
9321 */
9322 @@ -468,6 +476,26 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
9323 .id = USB_ID(0x05a7, 0x1020),
9324 .map = bose_companion5_map,
9325 },
9326 + { /* Gigabyte TRX40 Aorus Pro WiFi */
9327 + .id = USB_ID(0x0414, 0xa002),
9328 + .map = asus_rog_map,
9329 + },
9330 + { /* ASUS ROG Zenith II */
9331 + .id = USB_ID(0x0b05, 0x1916),
9332 + .map = asus_rog_map,
9333 + },
9334 + { /* ASUS ROG Strix */
9335 + .id = USB_ID(0x0b05, 0x1917),
9336 + .map = asus_rog_map,
9337 + },
9338 + { /* MSI TRX40 Creator */
9339 + .id = USB_ID(0x0db0, 0x0d64),
9340 + .map = asus_rog_map,
9341 + },
9342 + { /* MSI TRX40 */
9343 + .id = USB_ID(0x0db0, 0x543d),
9344 + .map = asus_rog_map,
9345 + },
9346 { 0 } /* terminator */
9347 };
9348
9349 diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
9350 index 6080de58861f..6289b8d20dff 100644
9351 --- a/tools/gpio/Makefile
9352 +++ b/tools/gpio/Makefile
9353 @@ -35,7 +35,7 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h
9354
9355 prepare: $(OUTPUT)include/linux/gpio.h
9356
9357 -GPIO_UTILS_IN := $(output)gpio-utils-in.o
9358 +GPIO_UTILS_IN := $(OUTPUT)gpio-utils-in.o
9359 $(GPIO_UTILS_IN): prepare FORCE
9360 $(Q)$(MAKE) $(build)=gpio-utils
9361
9362 diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
9363 index 46f7fba2306c..9832affd5d54 100644
9364 --- a/tools/perf/Makefile.config
9365 +++ b/tools/perf/Makefile.config
9366 @@ -228,8 +228,17 @@ strip-libs = $(filter-out -l%,$(1))
9367
9368 PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
9369
9370 +# Python 3.8 changed the output of `python-config --ldflags` to not include the
9371 +# '-lpythonX.Y' flag unless '--embed' is also passed. The feature check for
9372 +# libpython fails if that flag is not included in LDFLAGS
9373 +ifeq ($(shell $(PYTHON_CONFIG_SQ) --ldflags --embed 2>&1 1>/dev/null; echo $$?), 0)
9374 + PYTHON_CONFIG_LDFLAGS := --ldflags --embed
9375 +else
9376 + PYTHON_CONFIG_LDFLAGS := --ldflags
9377 +endif
9378 +
9379 ifdef PYTHON_CONFIG
9380 - PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
9381 + PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) $(PYTHON_CONFIG_LDFLAGS) 2>/dev/null)
9382 PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
9383 PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
9384 PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
9385 diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
9386 index 397d6b612502..aa6abfe0749c 100644
9387 --- a/tools/testing/radix-tree/Makefile
9388 +++ b/tools/testing/radix-tree/Makefile
9389 @@ -7,8 +7,8 @@ LDLIBS+= -lpthread -lurcu
9390 TARGETS = main idr-test multiorder xarray
9391 CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
9392 OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
9393 - regression4.o \
9394 - tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
9395 + regression4.o tag_check.o multiorder.o idr-test.o iteration_check.o \
9396 + iteration_check_2.o benchmark.o
9397
9398 ifndef SHIFT
9399 SHIFT=3
9400 diff --git a/tools/testing/radix-tree/iteration_check_2.c b/tools/testing/radix-tree/iteration_check_2.c
9401 new file mode 100644
9402 index 000000000000..aac5c50a3674
9403 --- /dev/null
9404 +++ b/tools/testing/radix-tree/iteration_check_2.c
9405 @@ -0,0 +1,87 @@
9406 +// SPDX-License-Identifier: GPL-2.0-or-later
9407 +/*
9408 + * iteration_check_2.c: Check that deleting a tagged entry doesn't cause
9409 + * an RCU walker to finish early.
9410 + * Copyright (c) 2020 Oracle
9411 + * Author: Matthew Wilcox <willy@infradead.org>
9412 + */
9413 +#include <pthread.h>
9414 +#include "test.h"
9415 +
9416 +static volatile bool test_complete;
9417 +
9418 +static void *iterator(void *arg)
9419 +{
9420 + XA_STATE(xas, arg, 0);
9421 + void *entry;
9422 +
9423 + rcu_register_thread();
9424 +
9425 + while (!test_complete) {
9426 + xas_set(&xas, 0);
9427 + rcu_read_lock();
9428 + xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
9429 + ;
9430 + rcu_read_unlock();
9431 + assert(xas.xa_index >= 100);
9432 + }
9433 +
9434 + rcu_unregister_thread();
9435 + return NULL;
9436 +}
9437 +
9438 +static void *throbber(void *arg)
9439 +{
9440 + struct xarray *xa = arg;
9441 +
9442 + rcu_register_thread();
9443 +
9444 + while (!test_complete) {
9445 + int i;
9446 +
9447 + for (i = 0; i < 100; i++) {
9448 + xa_store(xa, i, xa_mk_value(i), GFP_KERNEL);
9449 + xa_set_mark(xa, i, XA_MARK_0);
9450 + }
9451 + for (i = 0; i < 100; i++)
9452 + xa_erase(xa, i);
9453 + }
9454 +
9455 + rcu_unregister_thread();
9456 + return NULL;
9457 +}
9458 +
9459 +void iteration_test2(unsigned test_duration)
9460 +{
9461 + pthread_t threads[2];
9462 + DEFINE_XARRAY(array);
9463 + int i;
9464 +
9465 + printv(1, "Running iteration test 2 for %d seconds\n", test_duration);
9466 +
9467 + test_complete = false;
9468 +
9469 + xa_store(&array, 100, xa_mk_value(100), GFP_KERNEL);
9470 + xa_set_mark(&array, 100, XA_MARK_0);
9471 +
9472 + if (pthread_create(&threads[0], NULL, iterator, &array)) {
9473 + perror("create iterator thread");
9474 + exit(1);
9475 + }
9476 + if (pthread_create(&threads[1], NULL, throbber, &array)) {
9477 + perror("create throbber thread");
9478 + exit(1);
9479 + }
9480 +
9481 + sleep(test_duration);
9482 + test_complete = true;
9483 +
9484 + for (i = 0; i < 2; i++) {
9485 + if (pthread_join(threads[i], NULL)) {
9486 + perror("pthread_join");
9487 + exit(1);
9488 + }
9489 + }
9490 +
9491 + xa_destroy(&array);
9492 +}
9493 diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c
9494 index 7a22d6e3732e..f2cbc8e5b97c 100644
9495 --- a/tools/testing/radix-tree/main.c
9496 +++ b/tools/testing/radix-tree/main.c
9497 @@ -311,6 +311,7 @@ int main(int argc, char **argv)
9498 regression4_test();
9499 iteration_test(0, 10 + 90 * long_run);
9500 iteration_test(7, 10 + 90 * long_run);
9501 + iteration_test2(10 + 90 * long_run);
9502 single_thread_tests(long_run);
9503
9504 /* Free any remaining preallocated nodes */
9505 diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h
9506 index 1ee4b2c0ad10..34dab4d18744 100644
9507 --- a/tools/testing/radix-tree/test.h
9508 +++ b/tools/testing/radix-tree/test.h
9509 @@ -34,6 +34,7 @@ void xarray_tests(void);
9510 void tag_check(void);
9511 void multiorder_checks(void);
9512 void iteration_test(unsigned order, unsigned duration);
9513 +void iteration_test2(unsigned duration);
9514 void benchmark(void);
9515 void idr_checks(void);
9516 void ida_tests(void);
9517 diff --git a/tools/testing/selftests/net/reuseport_addr_any.c b/tools/testing/selftests/net/reuseport_addr_any.c
9518 index c6233935fed1..b8475cb29be7 100644
9519 --- a/tools/testing/selftests/net/reuseport_addr_any.c
9520 +++ b/tools/testing/selftests/net/reuseport_addr_any.c
9521 @@ -21,6 +21,10 @@
9522 #include <sys/socket.h>
9523 #include <unistd.h>
9524
9525 +#ifndef SOL_DCCP
9526 +#define SOL_DCCP 269
9527 +#endif
9528 +
9529 static const char *IP4_ADDR = "127.0.0.1";
9530 static const char *IP6_ADDR = "::1";
9531 static const char *IP4_MAPPED6 = "::ffff:127.0.0.1";
9532 diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
9533 index 7101ffd08d66..d021172fa2eb 100644
9534 --- a/tools/testing/selftests/powerpc/mm/.gitignore
9535 +++ b/tools/testing/selftests/powerpc/mm/.gitignore
9536 @@ -5,3 +5,4 @@ prot_sao
9537 segv_errors
9538 wild_bctr
9539 large_vm_fork_separation
9540 +tlbie_test
9541 diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c
9542 index 5a2d7b8efc40..6af951900aa3 100644
9543 --- a/tools/testing/selftests/vm/map_hugetlb.c
9544 +++ b/tools/testing/selftests/vm/map_hugetlb.c
9545 @@ -45,20 +45,20 @@ static void check_bytes(char *addr)
9546 printf("First hex is %x\n", *((unsigned int *)addr));
9547 }
9548
9549 -static void write_bytes(char *addr)
9550 +static void write_bytes(char *addr, size_t length)
9551 {
9552 unsigned long i;
9553
9554 - for (i = 0; i < LENGTH; i++)
9555 + for (i = 0; i < length; i++)
9556 *(addr + i) = (char)i;
9557 }
9558
9559 -static int read_bytes(char *addr)
9560 +static int read_bytes(char *addr, size_t length)
9561 {
9562 unsigned long i;
9563
9564 check_bytes(addr);
9565 - for (i = 0; i < LENGTH; i++)
9566 + for (i = 0; i < length; i++)
9567 if (*(addr + i) != (char)i) {
9568 printf("Mismatch at %lu\n", i);
9569 return 1;
9570 @@ -96,11 +96,11 @@ int main(int argc, char **argv)
9571
9572 printf("Returned address is %p\n", addr);
9573 check_bytes(addr);
9574 - write_bytes(addr);
9575 - ret = read_bytes(addr);
9576 + write_bytes(addr, length);
9577 + ret = read_bytes(addr, length);
9578
9579 /* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
9580 - if (munmap(addr, LENGTH)) {
9581 + if (munmap(addr, length)) {
9582 perror("munmap");
9583 exit(1);
9584 }
9585 diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c
9586 index 637b6d0ac0d0..11b2301f3aa3 100644
9587 --- a/tools/testing/selftests/vm/mlock2-tests.c
9588 +++ b/tools/testing/selftests/vm/mlock2-tests.c
9589 @@ -67,59 +67,6 @@ out:
9590 return ret;
9591 }
9592
9593 -static uint64_t get_pageflags(unsigned long addr)
9594 -{
9595 - FILE *file;
9596 - uint64_t pfn;
9597 - unsigned long offset;
9598 -
9599 - file = fopen("/proc/self/pagemap", "r");
9600 - if (!file) {
9601 - perror("fopen pagemap");
9602 - _exit(1);
9603 - }
9604 -
9605 - offset = addr / getpagesize() * sizeof(pfn);
9606 -
9607 - if (fseek(file, offset, SEEK_SET)) {
9608 - perror("fseek pagemap");
9609 - _exit(1);
9610 - }
9611 -
9612 - if (fread(&pfn, sizeof(pfn), 1, file) != 1) {
9613 - perror("fread pagemap");
9614 - _exit(1);
9615 - }
9616 -
9617 - fclose(file);
9618 - return pfn;
9619 -}
9620 -
9621 -static uint64_t get_kpageflags(unsigned long pfn)
9622 -{
9623 - uint64_t flags;
9624 - FILE *file;
9625 -
9626 - file = fopen("/proc/kpageflags", "r");
9627 - if (!file) {
9628 - perror("fopen kpageflags");
9629 - _exit(1);
9630 - }
9631 -
9632 - if (fseek(file, pfn * sizeof(flags), SEEK_SET)) {
9633 - perror("fseek kpageflags");
9634 - _exit(1);
9635 - }
9636 -
9637 - if (fread(&flags, sizeof(flags), 1, file) != 1) {
9638 - perror("fread kpageflags");
9639 - _exit(1);
9640 - }
9641 -
9642 - fclose(file);
9643 - return flags;
9644 -}
9645 -
9646 #define VMFLAGS "VmFlags:"
9647
9648 static bool is_vmflag_set(unsigned long addr, const char *vmflag)
9649 @@ -159,19 +106,13 @@ out:
9650 #define RSS "Rss:"
9651 #define LOCKED "lo"
9652
9653 -static bool is_vma_lock_on_fault(unsigned long addr)
9654 +static unsigned long get_value_for_name(unsigned long addr, const char *name)
9655 {
9656 - bool ret = false;
9657 - bool locked;
9658 - FILE *smaps = NULL;
9659 - unsigned long vma_size, vma_rss;
9660 char *line = NULL;
9661 - char *value;
9662 size_t size = 0;
9663 -
9664 - locked = is_vmflag_set(addr, LOCKED);
9665 - if (!locked)
9666 - goto out;
9667 + char *value_ptr;
9668 + FILE *smaps = NULL;
9669 + unsigned long value = -1UL;
9670
9671 smaps = seek_to_smaps_entry(addr);
9672 if (!smaps) {
9673 @@ -180,112 +121,70 @@ static bool is_vma_lock_on_fault(unsigned long addr)
9674 }
9675
9676 while (getline(&line, &size, smaps) > 0) {
9677 - if (!strstr(line, SIZE)) {
9678 + if (!strstr(line, name)) {
9679 free(line);
9680 line = NULL;
9681 size = 0;
9682 continue;
9683 }
9684
9685 - value = line + strlen(SIZE);
9686 - if (sscanf(value, "%lu kB", &vma_size) < 1) {
9687 + value_ptr = line + strlen(name);
9688 + if (sscanf(value_ptr, "%lu kB", &value) < 1) {
9689 printf("Unable to parse smaps entry for Size\n");
9690 goto out;
9691 }
9692 break;
9693 }
9694
9695 - while (getline(&line, &size, smaps) > 0) {
9696 - if (!strstr(line, RSS)) {
9697 - free(line);
9698 - line = NULL;
9699 - size = 0;
9700 - continue;
9701 - }
9702 -
9703 - value = line + strlen(RSS);
9704 - if (sscanf(value, "%lu kB", &vma_rss) < 1) {
9705 - printf("Unable to parse smaps entry for Rss\n");
9706 - goto out;
9707 - }
9708 - break;
9709 - }
9710 -
9711 - ret = locked && (vma_rss < vma_size);
9712 out:
9713 - free(line);
9714 if (smaps)
9715 fclose(smaps);
9716 - return ret;
9717 + free(line);
9718 + return value;
9719 }
9720
9721 -#define PRESENT_BIT 0x8000000000000000ULL
9722 -#define PFN_MASK 0x007FFFFFFFFFFFFFULL
9723 -#define UNEVICTABLE_BIT (1UL << 18)
9724 -
9725 -static int lock_check(char *map)
9726 +static bool is_vma_lock_on_fault(unsigned long addr)
9727 {
9728 - unsigned long page_size = getpagesize();
9729 - uint64_t page1_flags, page2_flags;
9730 + bool locked;
9731 + unsigned long vma_size, vma_rss;
9732
9733 - page1_flags = get_pageflags((unsigned long)map);
9734 - page2_flags = get_pageflags((unsigned long)map + page_size);
9735 + locked = is_vmflag_set(addr, LOCKED);
9736 + if (!locked)
9737 + return false;
9738
9739 - /* Both pages should be present */
9740 - if (((page1_flags & PRESENT_BIT) == 0) ||
9741 - ((page2_flags & PRESENT_BIT) == 0)) {
9742 - printf("Failed to make both pages present\n");
9743 - return 1;
9744 - }
9745 + vma_size = get_value_for_name(addr, SIZE);
9746 + vma_rss = get_value_for_name(addr, RSS);
9747
9748 - page1_flags = get_kpageflags(page1_flags & PFN_MASK);
9749 - page2_flags = get_kpageflags(page2_flags & PFN_MASK);
9750 + /* only one page is faulted in */
9751 + return (vma_rss < vma_size);
9752 +}
9753
9754 - /* Both pages should be unevictable */
9755 - if (((page1_flags & UNEVICTABLE_BIT) == 0) ||
9756 - ((page2_flags & UNEVICTABLE_BIT) == 0)) {
9757 - printf("Failed to make both pages unevictable\n");
9758 - return 1;
9759 - }
9760 +#define PRESENT_BIT 0x8000000000000000ULL
9761 +#define PFN_MASK 0x007FFFFFFFFFFFFFULL
9762 +#define UNEVICTABLE_BIT (1UL << 18)
9763
9764 - if (!is_vmflag_set((unsigned long)map, LOCKED)) {
9765 - printf("VMA flag %s is missing on page 1\n", LOCKED);
9766 - return 1;
9767 - }
9768 +static int lock_check(unsigned long addr)
9769 +{
9770 + bool locked;
9771 + unsigned long vma_size, vma_rss;
9772
9773 - if (!is_vmflag_set((unsigned long)map + page_size, LOCKED)) {
9774 - printf("VMA flag %s is missing on page 2\n", LOCKED);
9775 - return 1;
9776 - }
9777 + locked = is_vmflag_set(addr, LOCKED);
9778 + if (!locked)
9779 + return false;
9780
9781 - return 0;
9782 + vma_size = get_value_for_name(addr, SIZE);
9783 + vma_rss = get_value_for_name(addr, RSS);
9784 +
9785 + return (vma_rss == vma_size);
9786 }
9787
9788 static int unlock_lock_check(char *map)
9789 {
9790 - unsigned long page_size = getpagesize();
9791 - uint64_t page1_flags, page2_flags;
9792 -
9793 - page1_flags = get_pageflags((unsigned long)map);
9794 - page2_flags = get_pageflags((unsigned long)map + page_size);
9795 - page1_flags = get_kpageflags(page1_flags & PFN_MASK);
9796 - page2_flags = get_kpageflags(page2_flags & PFN_MASK);
9797 -
9798 - if ((page1_flags & UNEVICTABLE_BIT) || (page2_flags & UNEVICTABLE_BIT)) {
9799 - printf("A page is still marked unevictable after unlock\n");
9800 - return 1;
9801 - }
9802 -
9803 if (is_vmflag_set((unsigned long)map, LOCKED)) {
9804 printf("VMA flag %s is present on page 1 after unlock\n", LOCKED);
9805 return 1;
9806 }
9807
9808 - if (is_vmflag_set((unsigned long)map + page_size, LOCKED)) {
9809 - printf("VMA flag %s is present on page 2 after unlock\n", LOCKED);
9810 - return 1;
9811 - }
9812 -
9813 return 0;
9814 }
9815
9816 @@ -311,7 +210,7 @@ static int test_mlock_lock()
9817 goto unmap;
9818 }
9819
9820 - if (lock_check(map))
9821 + if (!lock_check((unsigned long)map))
9822 goto unmap;
9823
9824 /* Now unlock and recheck attributes */
9825 @@ -330,64 +229,18 @@ out:
9826
9827 static int onfault_check(char *map)
9828 {
9829 - unsigned long page_size = getpagesize();
9830 - uint64_t page1_flags, page2_flags;
9831 -
9832 - page1_flags = get_pageflags((unsigned long)map);
9833 - page2_flags = get_pageflags((unsigned long)map + page_size);
9834 -
9835 - /* Neither page should be present */
9836 - if ((page1_flags & PRESENT_BIT) || (page2_flags & PRESENT_BIT)) {
9837 - printf("Pages were made present by MLOCK_ONFAULT\n");
9838 - return 1;
9839 - }
9840 -
9841 *map = 'a';
9842 - page1_flags = get_pageflags((unsigned long)map);
9843 - page2_flags = get_pageflags((unsigned long)map + page_size);
9844 -
9845 - /* Only page 1 should be present */
9846 - if ((page1_flags & PRESENT_BIT) == 0) {
9847 - printf("Page 1 is not present after fault\n");
9848 - return 1;
9849 - } else if (page2_flags & PRESENT_BIT) {
9850 - printf("Page 2 was made present\n");
9851 - return 1;
9852 - }
9853 -
9854 - page1_flags = get_kpageflags(page1_flags & PFN_MASK);
9855 -
9856 - /* Page 1 should be unevictable */
9857 - if ((page1_flags & UNEVICTABLE_BIT) == 0) {
9858 - printf("Failed to make faulted page unevictable\n");
9859 - return 1;
9860 - }
9861 -
9862 if (!is_vma_lock_on_fault((unsigned long)map)) {
9863 printf("VMA is not marked for lock on fault\n");
9864 return 1;
9865 }
9866
9867 - if (!is_vma_lock_on_fault((unsigned long)map + page_size)) {
9868 - printf("VMA is not marked for lock on fault\n");
9869 - return 1;
9870 - }
9871 -
9872 return 0;
9873 }
9874
9875 static int unlock_onfault_check(char *map)
9876 {
9877 unsigned long page_size = getpagesize();
9878 - uint64_t page1_flags;
9879 -
9880 - page1_flags = get_pageflags((unsigned long)map);
9881 - page1_flags = get_kpageflags(page1_flags & PFN_MASK);
9882 -
9883 - if (page1_flags & UNEVICTABLE_BIT) {
9884 - printf("Page 1 is still marked unevictable after unlock\n");
9885 - return 1;
9886 - }
9887
9888 if (is_vma_lock_on_fault((unsigned long)map) ||
9889 is_vma_lock_on_fault((unsigned long)map + page_size)) {
9890 @@ -445,7 +298,6 @@ static int test_lock_onfault_of_present()
9891 char *map;
9892 int ret = 1;
9893 unsigned long page_size = getpagesize();
9894 - uint64_t page1_flags, page2_flags;
9895
9896 map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
9897 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
9898 @@ -465,17 +317,6 @@ static int test_lock_onfault_of_present()
9899 goto unmap;
9900 }
9901
9902 - page1_flags = get_pageflags((unsigned long)map);
9903 - page2_flags = get_pageflags((unsigned long)map + page_size);
9904 - page1_flags = get_kpageflags(page1_flags & PFN_MASK);
9905 - page2_flags = get_kpageflags(page2_flags & PFN_MASK);
9906 -
9907 - /* Page 1 should be unevictable */
9908 - if ((page1_flags & UNEVICTABLE_BIT) == 0) {
9909 - printf("Failed to make present page unevictable\n");
9910 - goto unmap;
9911 - }
9912 -
9913 if (!is_vma_lock_on_fault((unsigned long)map) ||
9914 !is_vma_lock_on_fault((unsigned long)map + page_size)) {
9915 printf("VMA with present pages is not marked lock on fault\n");
9916 @@ -507,7 +348,7 @@ static int test_munlockall()
9917 goto out;
9918 }
9919
9920 - if (lock_check(map))
9921 + if (!lock_check((unsigned long)map))
9922 goto unmap;
9923
9924 if (munlockall()) {
9925 @@ -549,7 +390,7 @@ static int test_munlockall()
9926 goto out;
9927 }
9928
9929 - if (lock_check(map))
9930 + if (!lock_check((unsigned long)map))
9931 goto unmap;
9932
9933 if (munlockall()) {
9934 diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
9935 index 6f22238f3217..12aaa063196e 100644
9936 --- a/tools/testing/selftests/x86/ptrace_syscall.c
9937 +++ b/tools/testing/selftests/x86/ptrace_syscall.c
9938 @@ -414,8 +414,12 @@ int main()
9939
9940 #if defined(__i386__) && (!defined(__GLIBC__) || __GLIBC__ > 2 || __GLIBC_MINOR__ >= 16)
9941 vsyscall32 = (void *)getauxval(AT_SYSINFO);
9942 - printf("[RUN]\tCheck AT_SYSINFO return regs\n");
9943 - test_sys32_regs(do_full_vsyscall32);
9944 + if (vsyscall32) {
9945 + printf("[RUN]\tCheck AT_SYSINFO return regs\n");
9946 + test_sys32_regs(do_full_vsyscall32);
9947 + } else {
9948 + printf("[SKIP]\tAT_SYSINFO is not available\n");
9949 + }
9950 #endif
9951
9952 test_ptrace_syscall_restart();