Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0134-5.4.35-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3515 - (hide annotations) (download)
Mon May 11 14:36:40 2020 UTC (4 years, 1 month ago) by niro
File size: 159503 byte(s)
-linux-5.4.35
1 niro 3515 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2     index b5c933fa971f..94fa4a8de2ca 100644
3     --- a/Documentation/admin-guide/kernel-parameters.txt
4     +++ b/Documentation/admin-guide/kernel-parameters.txt
5     @@ -2741,7 +2741,7 @@
6     <name>,<region-number>[,<base>,<size>,<buswidth>,<altbuswidth>]
7    
8     mtdparts= [MTD]
9     - See drivers/mtd/cmdlinepart.c.
10     + See drivers/mtd/parsers/cmdlinepart.c
11    
12     multitce=off [PPC] This parameter disables the use of the pSeries
13     firmware feature for updating multiple TCE entries
14     diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt
15     index b739f92da58e..1f90eb39870b 100644
16     --- a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt
17     +++ b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt
18     @@ -118,7 +118,7 @@ Tegra194:
19     --------
20    
21     pcie@14180000 {
22     - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
23     + compatible = "nvidia,tegra194-pcie";
24     power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
25     reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */
26     0x00 0x38000000 0x0 0x00040000 /* configuration space (256K) */
27     diff --git a/Makefile b/Makefile
28     index fdbc51db822a..6055a94aa4ce 100644
29     --- a/Makefile
30     +++ b/Makefile
31     @@ -1,7 +1,7 @@
32     # SPDX-License-Identifier: GPL-2.0
33     VERSION = 5
34     PATCHLEVEL = 4
35     -SUBLEVEL = 34
36     +SUBLEVEL = 35
37     EXTRAVERSION =
38     NAME = Kleptomaniac Octopus
39    
40     diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
41     index e6b4b8525f98..bc488df31511 100644
42     --- a/arch/arm/boot/dts/imx6qdl.dtsi
43     +++ b/arch/arm/boot/dts/imx6qdl.dtsi
44     @@ -1039,9 +1039,8 @@
45     compatible = "fsl,imx6q-fec";
46     reg = <0x02188000 0x4000>;
47     interrupt-names = "int0", "pps";
48     - interrupts-extended =
49     - <&intc 0 118 IRQ_TYPE_LEVEL_HIGH>,
50     - <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
51     + interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>,
52     + <0 119 IRQ_TYPE_LEVEL_HIGH>;
53     clocks = <&clks IMX6QDL_CLK_ENET>,
54     <&clks IMX6QDL_CLK_ENET>,
55     <&clks IMX6QDL_CLK_ENET_REF>;
56     diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi
57     index 5f51f8e5c1fa..d91f92f944c5 100644
58     --- a/arch/arm/boot/dts/imx6qp.dtsi
59     +++ b/arch/arm/boot/dts/imx6qp.dtsi
60     @@ -77,7 +77,6 @@
61     };
62    
63     &fec {
64     - /delete-property/interrupts-extended;
65     interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>,
66     <0 119 IRQ_TYPE_LEVEL_HIGH>;
67     };
68     diff --git a/arch/arm/boot/dts/rk3188-bqedison2qc.dts b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
69     index ad1afd403052..66a0ff196eb1 100644
70     --- a/arch/arm/boot/dts/rk3188-bqedison2qc.dts
71     +++ b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
72     @@ -58,20 +58,25 @@
73    
74     lvds-encoder {
75     compatible = "ti,sn75lvds83", "lvds-encoder";
76     - #address-cells = <1>;
77     - #size-cells = <0>;
78    
79     - port@0 {
80     - reg = <0>;
81     - lvds_in_vop0: endpoint {
82     - remote-endpoint = <&vop0_out_lvds>;
83     + ports {
84     + #address-cells = <1>;
85     + #size-cells = <0>;
86     +
87     + port@0 {
88     + reg = <0>;
89     +
90     + lvds_in_vop0: endpoint {
91     + remote-endpoint = <&vop0_out_lvds>;
92     + };
93     };
94     - };
95    
96     - port@1 {
97     - reg = <1>;
98     - lvds_out_panel: endpoint {
99     - remote-endpoint = <&panel_in_lvds>;
100     + port@1 {
101     + reg = <1>;
102     +
103     + lvds_out_panel: endpoint {
104     + remote-endpoint = <&panel_in_lvds>;
105     + };
106     };
107     };
108     };
109     @@ -465,7 +470,7 @@
110     non-removable;
111     pinctrl-names = "default";
112     pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_bus4>;
113     - vmmcq-supply = <&vccio_wl>;
114     + vqmmc-supply = <&vccio_wl>;
115     #address-cells = <1>;
116     #size-cells = <0>;
117     status = "okay";
118     diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
119     index 74bb053cf23c..4e485e45fbc5 100644
120     --- a/arch/arm/boot/dts/sun8i-a83t.dtsi
121     +++ b/arch/arm/boot/dts/sun8i-a83t.dtsi
122     @@ -313,7 +313,7 @@
123    
124     display_clocks: clock@1000000 {
125     compatible = "allwinner,sun8i-a83t-de2-clk";
126     - reg = <0x01000000 0x100000>;
127     + reg = <0x01000000 0x10000>;
128     clocks = <&ccu CLK_BUS_DE>,
129     <&ccu CLK_PLL_DE>;
130     clock-names = "bus",
131     diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi
132     index 80f4dc34df34..339402601990 100644
133     --- a/arch/arm/boot/dts/sun8i-r40.dtsi
134     +++ b/arch/arm/boot/dts/sun8i-r40.dtsi
135     @@ -118,7 +118,7 @@
136     display_clocks: clock@1000000 {
137     compatible = "allwinner,sun8i-r40-de2-clk",
138     "allwinner,sun8i-h3-de2-clk";
139     - reg = <0x01000000 0x100000>;
140     + reg = <0x01000000 0x10000>;
141     clocks = <&ccu CLK_BUS_DE>,
142     <&ccu CLK_DE>;
143     clock-names = "bus",
144     diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
145     index 23ba56df38f7..2abcba35d27e 100644
146     --- a/arch/arm/boot/dts/sun8i-v3s.dtsi
147     +++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
148     @@ -105,7 +105,7 @@
149    
150     display_clocks: clock@1000000 {
151     compatible = "allwinner,sun8i-v3s-de2-clk";
152     - reg = <0x01000000 0x100000>;
153     + reg = <0x01000000 0x10000>;
154     clocks = <&ccu CLK_BUS_DE>,
155     <&ccu CLK_DE>;
156     clock-names = "bus",
157     diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
158     index 107eeafad20a..b3141c964c3a 100644
159     --- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
160     +++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
161     @@ -113,7 +113,7 @@
162    
163     display_clocks: clock@1000000 {
164     /* compatible is in per SoC .dtsi file */
165     - reg = <0x01000000 0x100000>;
166     + reg = <0x01000000 0x10000>;
167     clocks = <&ccu CLK_BUS_DE>,
168     <&ccu CLK_DE>;
169     clock-names = "bus",
170     diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
171     index 97dc386e3cb8..7216653424fd 100644
172     --- a/arch/arm/net/bpf_jit_32.c
173     +++ b/arch/arm/net/bpf_jit_32.c
174     @@ -929,7 +929,11 @@ static inline void emit_a32_rsh_i64(const s8 dst[],
175     rd = arm_bpf_get_reg64(dst, tmp, ctx);
176    
177     /* Do LSR operation */
178     - if (val < 32) {
179     + if (val == 0) {
180     + /* An immediate value of 0 encodes a shift amount of 32
181     + * for LSR. To shift by 0, don't do anything.
182     + */
183     + } else if (val < 32) {
184     emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
185     emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
186     emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
187     @@ -955,7 +959,11 @@ static inline void emit_a32_arsh_i64(const s8 dst[],
188     rd = arm_bpf_get_reg64(dst, tmp, ctx);
189    
190     /* Do ARSH operation */
191     - if (val < 32) {
192     + if (val == 0) {
193     + /* An immediate value of 0 encodes a shift amount of 32
194     + * for ASR. To shift by 0, don't do anything.
195     + */
196     + } else if (val < 32) {
197     emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
198     emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
199     emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
200     @@ -992,21 +1000,35 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
201     arm_bpf_put_reg32(dst_hi, rd[0], ctx);
202     }
203    
204     +static bool is_ldst_imm(s16 off, const u8 size)
205     +{
206     + s16 off_max = 0;
207     +
208     + switch (size) {
209     + case BPF_B:
210     + case BPF_W:
211     + off_max = 0xfff;
212     + break;
213     + case BPF_H:
214     + off_max = 0xff;
215     + break;
216     + case BPF_DW:
217     + /* Need to make sure off+4 does not overflow. */
218     + off_max = 0xfff - 4;
219     + break;
220     + }
221     + return -off_max <= off && off <= off_max;
222     +}
223     +
224     /* *(size *)(dst + off) = src */
225     static inline void emit_str_r(const s8 dst, const s8 src[],
226     - s32 off, struct jit_ctx *ctx, const u8 sz){
227     + s16 off, struct jit_ctx *ctx, const u8 sz){
228     const s8 *tmp = bpf2a32[TMP_REG_1];
229     - s32 off_max;
230     s8 rd;
231    
232     rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
233    
234     - if (sz == BPF_H)
235     - off_max = 0xff;
236     - else
237     - off_max = 0xfff;
238     -
239     - if (off < 0 || off > off_max) {
240     + if (!is_ldst_imm(off, sz)) {
241     emit_a32_mov_i(tmp[0], off, ctx);
242     emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
243     rd = tmp[0];
244     @@ -1035,18 +1057,12 @@ static inline void emit_str_r(const s8 dst, const s8 src[],
245    
246     /* dst = *(size*)(src + off) */
247     static inline void emit_ldx_r(const s8 dst[], const s8 src,
248     - s32 off, struct jit_ctx *ctx, const u8 sz){
249     + s16 off, struct jit_ctx *ctx, const u8 sz){
250     const s8 *tmp = bpf2a32[TMP_REG_1];
251     const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
252     s8 rm = src;
253     - s32 off_max;
254     -
255     - if (sz == BPF_H)
256     - off_max = 0xff;
257     - else
258     - off_max = 0xfff;
259    
260     - if (off < 0 || off > off_max) {
261     + if (!is_ldst_imm(off, sz)) {
262     emit_a32_mov_i(tmp[0], off, ctx);
263     emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
264     rm = tmp[0];
265     diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
266     index ba41c1b85887..367699c8c902 100644
267     --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
268     +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
269     @@ -227,7 +227,7 @@
270    
271     display_clocks: clock@0 {
272     compatible = "allwinner,sun50i-a64-de2-clk";
273     - reg = <0x0 0x100000>;
274     + reg = <0x0 0x10000>;
275     clocks = <&ccu CLK_BUS_DE>,
276     <&ccu CLK_DE>;
277     clock-names = "bus",
278     diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
279     index a211a046b2f2..b90d78a5724b 100644
280     --- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
281     +++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
282     @@ -367,6 +367,7 @@
283     pinctrl-0 = <&cp0_copper_eth_phy_reset>;
284     reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>;
285     reset-assert-us = <10000>;
286     + reset-deassert-us = <10000>;
287     };
288    
289     switch0: switch0@4 {
290     diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
291     index 3c0cf54f0aab..457b815d57f4 100644
292     --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
293     +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
294     @@ -1151,7 +1151,7 @@
295     };
296    
297     pcie@14100000 {
298     - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
299     + compatible = "nvidia,tegra194-pcie";
300     power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX1A>;
301     reg = <0x00 0x14100000 0x0 0x00020000 /* appl registers (128K) */
302     0x00 0x30000000 0x0 0x00040000 /* configuration space (256K) */
303     @@ -1197,7 +1197,7 @@
304     };
305    
306     pcie@14120000 {
307     - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
308     + compatible = "nvidia,tegra194-pcie";
309     power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX1A>;
310     reg = <0x00 0x14120000 0x0 0x00020000 /* appl registers (128K) */
311     0x00 0x32000000 0x0 0x00040000 /* configuration space (256K) */
312     @@ -1243,7 +1243,7 @@
313     };
314    
315     pcie@14140000 {
316     - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
317     + compatible = "nvidia,tegra194-pcie";
318     power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX1A>;
319     reg = <0x00 0x14140000 0x0 0x00020000 /* appl registers (128K) */
320     0x00 0x34000000 0x0 0x00040000 /* configuration space (256K) */
321     @@ -1289,7 +1289,7 @@
322     };
323    
324     pcie@14160000 {
325     - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
326     + compatible = "nvidia,tegra194-pcie";
327     power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>;
328     reg = <0x00 0x14160000 0x0 0x00020000 /* appl registers (128K) */
329     0x00 0x36000000 0x0 0x00040000 /* configuration space (256K) */
330     @@ -1335,7 +1335,7 @@
331     };
332    
333     pcie@14180000 {
334     - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
335     + compatible = "nvidia,tegra194-pcie";
336     power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
337     reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */
338     0x00 0x38000000 0x0 0x00040000 /* configuration space (256K) */
339     @@ -1381,7 +1381,7 @@
340     };
341    
342     pcie@141a0000 {
343     - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
344     + compatible = "nvidia,tegra194-pcie";
345     power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
346     reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */
347     0x00 0x3a000000 0x0 0x00040000 /* configuration space (256K) */
348     @@ -1430,6 +1430,105 @@
349     0x82000000 0x0 0x40000000 0x1f 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */
350     };
351    
352     + pcie_ep@14160000 {
353     + compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
354     + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>;
355     + reg = <0x00 0x14160000 0x0 0x00020000 /* appl registers (128K) */
356     + 0x00 0x36040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
357     + 0x00 0x36080000 0x0 0x00040000 /* DBI reg space (256K) */
358     + 0x14 0x00000000 0x4 0x00000000>; /* Address Space (16G) */
359     + reg-names = "appl", "atu_dma", "dbi", "addr_space";
360     +
361     + status = "disabled";
362     +
363     + num-lanes = <4>;
364     + num-ib-windows = <2>;
365     + num-ob-windows = <8>;
366     +
367     + clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_4>;
368     + clock-names = "core";
369     +
370     + resets = <&bpmp TEGRA194_RESET_PEX0_CORE_4_APB>,
371     + <&bpmp TEGRA194_RESET_PEX0_CORE_4>;
372     + reset-names = "apb", "core";
373     +
374     + interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
375     + interrupt-names = "intr";
376     +
377     + nvidia,bpmp = <&bpmp 4>;
378     +
379     + nvidia,aspm-cmrt-us = <60>;
380     + nvidia,aspm-pwr-on-t-us = <20>;
381     + nvidia,aspm-l0s-entrance-latency-us = <3>;
382     + };
383     +
384     + pcie_ep@14180000 {
385     + compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
386     + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
387     + reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */
388     + 0x00 0x38040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
389     + 0x00 0x38080000 0x0 0x00040000 /* DBI reg space (256K) */
390     + 0x18 0x00000000 0x4 0x00000000>; /* Address Space (16G) */
391     + reg-names = "appl", "atu_dma", "dbi", "addr_space";
392     +
393     + status = "disabled";
394     +
395     + num-lanes = <8>;
396     + num-ib-windows = <2>;
397     + num-ob-windows = <8>;
398     +
399     + clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_0>;
400     + clock-names = "core";
401     +
402     + resets = <&bpmp TEGRA194_RESET_PEX0_CORE_0_APB>,
403     + <&bpmp TEGRA194_RESET_PEX0_CORE_0>;
404     + reset-names = "apb", "core";
405     +
406     + interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
407     + interrupt-names = "intr";
408     +
409     + nvidia,bpmp = <&bpmp 0>;
410     +
411     + nvidia,aspm-cmrt-us = <60>;
412     + nvidia,aspm-pwr-on-t-us = <20>;
413     + nvidia,aspm-l0s-entrance-latency-us = <3>;
414     + };
415     +
416     + pcie_ep@141a0000 {
417     + compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
418     + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
419     + reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */
420     + 0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
421     + 0x00 0x3a080000 0x0 0x00040000 /* DBI reg space (256K) */
422     + 0x1c 0x00000000 0x4 0x00000000>; /* Address Space (16G) */
423     + reg-names = "appl", "atu_dma", "dbi", "addr_space";
424     +
425     + status = "disabled";
426     +
427     + num-lanes = <8>;
428     + num-ib-windows = <2>;
429     + num-ob-windows = <8>;
430     +
431     + pinctrl-names = "default";
432     + pinctrl-0 = <&clkreq_c5_bi_dir_state>;
433     +
434     + clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>;
435     + clock-names = "core";
436     +
437     + resets = <&bpmp TEGRA194_RESET_PEX1_CORE_5_APB>,
438     + <&bpmp TEGRA194_RESET_PEX1_CORE_5>;
439     + reset-names = "apb", "core";
440     +
441     + interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
442     + interrupt-names = "intr";
443     +
444     + nvidia,bpmp = <&bpmp 5>;
445     +
446     + nvidia,aspm-cmrt-us = <60>;
447     + nvidia,aspm-pwr-on-t-us = <20>;
448     + nvidia,aspm-l0s-entrance-latency-us = <3>;
449     + };
450     +
451     sysram@40000000 {
452     compatible = "nvidia,tegra194-sysram", "mmio-sram";
453     reg = <0x0 0x40000000 0x0 0x50000>;
454     diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h
455     index f35a9f3315ee..5056ebb902d1 100644
456     --- a/arch/csky/abiv1/inc/abi/entry.h
457     +++ b/arch/csky/abiv1/inc/abi/entry.h
458     @@ -172,10 +172,7 @@
459     addi r6, 0xe
460     cpwcr r6, cpcr30
461    
462     - lsri r6, 28
463     - addi r6, 2
464     - lsli r6, 28
465     - addi r6, 0xe
466     + movi r6, 0
467     cpwcr r6, cpcr31
468     .endm
469    
470     diff --git a/arch/csky/abiv2/fpu.c b/arch/csky/abiv2/fpu.c
471     index 86d187d4e5af..5acc5c2e544e 100644
472     --- a/arch/csky/abiv2/fpu.c
473     +++ b/arch/csky/abiv2/fpu.c
474     @@ -10,11 +10,6 @@
475     #define MTCR_DIST 0xC0006420
476     #define MFCR_DIST 0xC0006020
477    
478     -void __init init_fpu(void)
479     -{
480     - mtcr("cr<1, 2>", 0);
481     -}
482     -
483     /*
484     * fpu_libc_helper() is to help libc to excute:
485     * - mfcr %a, cr<1, 2>
486     diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h
487     index 94a7a58765df..111973c6c713 100644
488     --- a/arch/csky/abiv2/inc/abi/entry.h
489     +++ b/arch/csky/abiv2/inc/abi/entry.h
490     @@ -230,11 +230,8 @@
491     addi r6, 0x1ce
492     mtcr r6, cr<30, 15> /* Set MSA0 */
493    
494     - lsri r6, 28
495     - addi r6, 2
496     - lsli r6, 28
497     - addi r6, 0x1ce
498     - mtcr r6, cr<31, 15> /* Set MSA1 */
499     + movi r6, 0
500     + mtcr r6, cr<31, 15> /* Clr MSA1 */
501    
502     /* enable MMU */
503     mfcr r6, cr18
504     diff --git a/arch/csky/abiv2/inc/abi/fpu.h b/arch/csky/abiv2/inc/abi/fpu.h
505     index 22ca3cf2794a..09e2700a3693 100644
506     --- a/arch/csky/abiv2/inc/abi/fpu.h
507     +++ b/arch/csky/abiv2/inc/abi/fpu.h
508     @@ -9,7 +9,8 @@
509    
510     int fpu_libc_helper(struct pt_regs *regs);
511     void fpu_fpe(struct pt_regs *regs);
512     -void __init init_fpu(void);
513     +
514     +static inline void init_fpu(void) { mtcr("cr<1, 2>", 0); }
515    
516     void save_to_user_fp(struct user_fp *user_fp);
517     void restore_from_user_fp(struct user_fp *user_fp);
518     diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
519     index 21e0bd5293dd..c6bcd7f7c720 100644
520     --- a/arch/csky/include/asm/processor.h
521     +++ b/arch/csky/include/asm/processor.h
522     @@ -43,6 +43,7 @@ extern struct cpuinfo_csky cpu_data[];
523     struct thread_struct {
524     unsigned long ksp; /* kernel stack pointer */
525     unsigned long sr; /* saved status register */
526     + unsigned long trap_no; /* saved status register */
527    
528     /* FPU regs */
529     struct user_fp __aligned(16) user_fp;
530     diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S
531     index 61989f9241c0..17ed9d250480 100644
532     --- a/arch/csky/kernel/head.S
533     +++ b/arch/csky/kernel/head.S
534     @@ -21,6 +21,11 @@ END(_start)
535     ENTRY(_start_smp_secondary)
536     SETUP_MMU
537    
538     + /* copy msa1 from CPU0 */
539     + lrw r6, secondary_msa1
540     + ld.w r6, (r6, 0)
541     + mtcr r6, cr<31, 15>
542     +
543     /* set stack point */
544     lrw r6, secondary_stack
545     ld.w r6, (r6, 0)
546     diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
547     index 23ee604aafdb..2c1e253abb74 100644
548     --- a/arch/csky/kernel/setup.c
549     +++ b/arch/csky/kernel/setup.c
550     @@ -24,26 +24,9 @@ struct screen_info screen_info = {
551     };
552     #endif
553    
554     -phys_addr_t __init_memblock memblock_end_of_REG0(void)
555     -{
556     - return (memblock.memory.regions[0].base +
557     - memblock.memory.regions[0].size);
558     -}
559     -
560     -phys_addr_t __init_memblock memblock_start_of_REG1(void)
561     -{
562     - return memblock.memory.regions[1].base;
563     -}
564     -
565     -size_t __init_memblock memblock_size_of_REG1(void)
566     -{
567     - return memblock.memory.regions[1].size;
568     -}
569     -
570     static void __init csky_memblock_init(void)
571     {
572     unsigned long zone_size[MAX_NR_ZONES];
573     - unsigned long zhole_size[MAX_NR_ZONES];
574     signed long size;
575    
576     memblock_reserve(__pa(_stext), _end - _stext);
577     @@ -57,54 +40,36 @@ static void __init csky_memblock_init(void)
578     memblock_dump_all();
579    
580     memset(zone_size, 0, sizeof(zone_size));
581     - memset(zhole_size, 0, sizeof(zhole_size));
582    
583     min_low_pfn = PFN_UP(memblock_start_of_DRAM());
584     - max_pfn = PFN_DOWN(memblock_end_of_DRAM());
585     -
586     - max_low_pfn = PFN_UP(memblock_end_of_REG0());
587     - if (max_low_pfn == 0)
588     - max_low_pfn = max_pfn;
589     + max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM());
590    
591     size = max_pfn - min_low_pfn;
592    
593     - if (memblock.memory.cnt > 1) {
594     - zone_size[ZONE_NORMAL] =
595     - PFN_DOWN(memblock_start_of_REG1()) - min_low_pfn;
596     - zhole_size[ZONE_NORMAL] =
597     - PFN_DOWN(memblock_start_of_REG1()) - max_low_pfn;
598     + if (size <= PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET))
599     + zone_size[ZONE_NORMAL] = size;
600     + else if (size < PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET)) {
601     + zone_size[ZONE_NORMAL] =
602     + PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET);
603     + max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
604     } else {
605     - if (size <= PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET))
606     - zone_size[ZONE_NORMAL] = max_pfn - min_low_pfn;
607     - else {
608     - zone_size[ZONE_NORMAL] =
609     + zone_size[ZONE_NORMAL] =
610     PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
611     - max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
612     - }
613     + max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
614     + write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE);
615     }
616    
617     #ifdef CONFIG_HIGHMEM
618     - size = 0;
619     - if (memblock.memory.cnt > 1) {
620     - size = PFN_DOWN(memblock_size_of_REG1());
621     - highstart_pfn = PFN_DOWN(memblock_start_of_REG1());
622     - } else {
623     - size = max_pfn - min_low_pfn -
624     - PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
625     - highstart_pfn = min_low_pfn +
626     - PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
627     - }
628     -
629     - if (size > 0)
630     - zone_size[ZONE_HIGHMEM] = size;
631     + zone_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
632    
633     - highend_pfn = max_pfn;
634     + highstart_pfn = max_low_pfn;
635     + highend_pfn = max_pfn;
636     #endif
637     memblock_set_current_limit(PFN_PHYS(max_low_pfn));
638    
639     dma_contiguous_reserve(0);
640    
641     - free_area_init_node(0, zone_size, min_low_pfn, zhole_size);
642     + free_area_init_node(0, zone_size, min_low_pfn, NULL);
643     }
644    
645     void __init setup_arch(char **cmdline_p)
646     diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
647     index 0bb0954d5570..b5c5bc3afeb5 100644
648     --- a/arch/csky/kernel/smp.c
649     +++ b/arch/csky/kernel/smp.c
650     @@ -22,6 +22,9 @@
651     #include <asm/sections.h>
652     #include <asm/mmu_context.h>
653     #include <asm/pgalloc.h>
654     +#ifdef CONFIG_CPU_HAS_FPU
655     +#include <abi/fpu.h>
656     +#endif
657    
658     struct ipi_data_struct {
659     unsigned long bits ____cacheline_aligned;
660     @@ -156,6 +159,8 @@ volatile unsigned int secondary_hint;
661     volatile unsigned int secondary_ccr;
662     volatile unsigned int secondary_stack;
663    
664     +unsigned long secondary_msa1;
665     +
666     int __cpu_up(unsigned int cpu, struct task_struct *tidle)
667     {
668     unsigned long mask = 1 << cpu;
669     @@ -164,6 +169,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
670     (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
671     secondary_hint = mfcr("cr31");
672     secondary_ccr = mfcr("cr18");
673     + secondary_msa1 = read_mmu_msa1();
674    
675     /*
676     * Because other CPUs are in reset status, we must flush data
677     diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c
678     index b057480e7463..63715cb90ee9 100644
679     --- a/arch/csky/kernel/traps.c
680     +++ b/arch/csky/kernel/traps.c
681     @@ -115,8 +115,9 @@ asmlinkage void trap_c(struct pt_regs *regs)
682     int sig;
683     unsigned long vector;
684     siginfo_t info;
685     + struct task_struct *tsk = current;
686    
687     - vector = (mfcr("psr") >> 16) & 0xff;
688     + vector = (regs->sr >> 16) & 0xff;
689    
690     switch (vector) {
691     case VEC_ZERODIV:
692     @@ -129,6 +130,7 @@ asmlinkage void trap_c(struct pt_regs *regs)
693     sig = SIGTRAP;
694     break;
695     case VEC_ILLEGAL:
696     + tsk->thread.trap_no = vector;
697     die_if_kernel("Kernel mode ILLEGAL", regs, vector);
698     #ifndef CONFIG_CPU_NO_USER_BKPT
699     if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT)
700     @@ -146,16 +148,20 @@ asmlinkage void trap_c(struct pt_regs *regs)
701     sig = SIGTRAP;
702     break;
703     case VEC_ACCESS:
704     + tsk->thread.trap_no = vector;
705     return buserr(regs);
706     #ifdef CONFIG_CPU_NEED_SOFTALIGN
707     case VEC_ALIGN:
708     + tsk->thread.trap_no = vector;
709     return csky_alignment(regs);
710     #endif
711     #ifdef CONFIG_CPU_HAS_FPU
712     case VEC_FPE:
713     + tsk->thread.trap_no = vector;
714     die_if_kernel("Kernel mode FPE", regs, vector);
715     return fpu_fpe(regs);
716     case VEC_PRIV:
717     + tsk->thread.trap_no = vector;
718     die_if_kernel("Kernel mode PRIV", regs, vector);
719     if (fpu_libc_helper(regs))
720     return;
721     @@ -164,5 +170,8 @@ asmlinkage void trap_c(struct pt_regs *regs)
722     sig = SIGSEGV;
723     break;
724     }
725     +
726     + tsk->thread.trap_no = vector;
727     +
728     send_sig(sig, current, 0);
729     }
730     diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
731     index f76618b630f9..562c7f708749 100644
732     --- a/arch/csky/mm/fault.c
733     +++ b/arch/csky/mm/fault.c
734     @@ -179,11 +179,14 @@ bad_area:
735     bad_area_nosemaphore:
736     /* User mode accesses just cause a SIGSEGV */
737     if (user_mode(regs)) {
738     + tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
739     force_sig_fault(SIGSEGV, si_code, (void __user *)address);
740     return;
741     }
742    
743     no_context:
744     + tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
745     +
746     /* Are we prepared to handle this kernel fault? */
747     if (fixup_exception(regs))
748     return;
749     @@ -198,6 +201,8 @@ no_context:
750     die_if_kernel("Oops", regs, write);
751    
752     out_of_memory:
753     + tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
754     +
755     /*
756     * We ran out of memory, call the OOM killer, and return the userspace
757     * (which will retry the fault, or kill us if we got oom-killed).
758     @@ -206,6 +211,8 @@ out_of_memory:
759     return;
760    
761     do_sigbus:
762     + tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
763     +
764     up_read(&mm->mmap_sem);
765    
766     /* Kernel mode? Handle exceptions or die */
767     diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
768     index eba9d4ee4baf..689664cd4e79 100644
769     --- a/arch/powerpc/kernel/prom_init.c
770     +++ b/arch/powerpc/kernel/prom_init.c
771     @@ -1761,6 +1761,9 @@ static void __init prom_rtas_os_term(char *str)
772     if (token == 0)
773     prom_panic("Could not get token for ibm,os-term\n");
774     os_term_args.token = cpu_to_be32(token);
775     + os_term_args.nargs = cpu_to_be32(1);
776     + os_term_args.nret = cpu_to_be32(1);
777     + os_term_args.args[0] = cpu_to_be32(__pa(str));
778     prom_rtas_hcall((uint64_t)&os_term_args);
779     }
780     #endif /* CONFIG_PPC_SVM */
781     diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
782     index 36abbe3c346d..e2183fed947d 100644
783     --- a/arch/powerpc/kvm/book3s_hv.c
784     +++ b/arch/powerpc/kvm/book3s_hv.c
785     @@ -3623,6 +3623,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
786     if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
787     kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
788     kvmppc_nested_cede(vcpu);
789     + kvmppc_set_gpr(vcpu, 3, 0);
790     trap = 0;
791     }
792     } else {
793     diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
794     index 9cd6f3e1000b..09a0594350b6 100644
795     --- a/arch/powerpc/platforms/maple/setup.c
796     +++ b/arch/powerpc/platforms/maple/setup.c
797     @@ -294,23 +294,6 @@ static int __init maple_probe(void)
798     return 1;
799     }
800    
801     -define_machine(maple) {
802     - .name = "Maple",
803     - .probe = maple_probe,
804     - .setup_arch = maple_setup_arch,
805     - .init_IRQ = maple_init_IRQ,
806     - .pci_irq_fixup = maple_pci_irq_fixup,
807     - .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
808     - .restart = maple_restart,
809     - .halt = maple_halt,
810     - .get_boot_time = maple_get_boot_time,
811     - .set_rtc_time = maple_set_rtc_time,
812     - .get_rtc_time = maple_get_rtc_time,
813     - .calibrate_decr = generic_calibrate_decr,
814     - .progress = maple_progress,
815     - .power_save = power4_idle,
816     -};
817     -
818     #ifdef CONFIG_EDAC
819     /*
820     * Register a platform device for CPC925 memory controller on
821     @@ -367,3 +350,20 @@ static int __init maple_cpc925_edac_setup(void)
822     }
823     machine_device_initcall(maple, maple_cpc925_edac_setup);
824     #endif
825     +
826     +define_machine(maple) {
827     + .name = "Maple",
828     + .probe = maple_probe,
829     + .setup_arch = maple_setup_arch,
830     + .init_IRQ = maple_init_IRQ,
831     + .pci_irq_fixup = maple_pci_irq_fixup,
832     + .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
833     + .restart = maple_restart,
834     + .halt = maple_halt,
835     + .get_boot_time = maple_get_boot_time,
836     + .set_rtc_time = maple_set_rtc_time,
837     + .get_rtc_time = maple_get_rtc_time,
838     + .calibrate_decr = generic_calibrate_decr,
839     + .progress = maple_progress,
840     + .power_save = power4_idle,
841     +};
842     diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
843     index fdb8083e7870..229e1e2f8253 100644
844     --- a/arch/s390/kernel/perf_cpum_sf.c
845     +++ b/arch/s390/kernel/perf_cpum_sf.c
846     @@ -1589,6 +1589,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
847     perf_aux_output_end(handle, size);
848     num_sdb = aux->sfb.num_sdb;
849    
850     + num_sdb = aux->sfb.num_sdb;
851     while (!done) {
852     /* Get an output handle */
853     aux = perf_aux_output_begin(handle, cpuhw->event);
854     diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
855     index 6ebc2117c66c..91b9b3f73de6 100644
856     --- a/arch/s390/kernel/processor.c
857     +++ b/arch/s390/kernel/processor.c
858     @@ -165,8 +165,9 @@ static void show_cpu_mhz(struct seq_file *m, unsigned long n)
859     static int show_cpuinfo(struct seq_file *m, void *v)
860     {
861     unsigned long n = (unsigned long) v - 1;
862     + unsigned long first = cpumask_first(cpu_online_mask);
863    
864     - if (!n)
865     + if (n == first)
866     show_cpu_summary(m, v);
867     if (!machine_has_cpu_mhz)
868     return 0;
869     @@ -179,6 +180,8 @@ static inline void *c_update(loff_t *pos)
870     {
871     if (*pos)
872     *pos = cpumask_next(*pos - 1, cpu_online_mask);
873     + else
874     + *pos = cpumask_first(cpu_online_mask);
875     return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
876     }
877    
878     diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
879     index 9d9ab77d02dd..364e3a89c096 100644
880     --- a/arch/s390/mm/gmap.c
881     +++ b/arch/s390/mm/gmap.c
882     @@ -1844,6 +1844,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
883     goto out_free;
884     } else if (*table & _REGION_ENTRY_ORIGIN) {
885     rc = -EAGAIN; /* Race with shadow */
886     + goto out_free;
887     }
888     crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
889     /* mark as invalid as long as the parent table is not protected */
890     diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
891     index 6627d7c30f37..0f5d0a699a49 100644
892     --- a/arch/um/drivers/ubd_kern.c
893     +++ b/arch/um/drivers/ubd_kern.c
894     @@ -1606,7 +1606,9 @@ int io_thread(void *arg)
895     written = 0;
896    
897     do {
898     - res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n);
899     + res = os_write_file(kernel_fd,
900     + ((char *) io_req_buffer) + written,
901     + n - written);
902     if (res >= 0) {
903     written += res;
904     }
905     diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
906     index 5133e3afb96f..3996937e2c0d 100644
907     --- a/arch/um/os-Linux/file.c
908     +++ b/arch/um/os-Linux/file.c
909     @@ -8,6 +8,7 @@
910     #include <errno.h>
911     #include <fcntl.h>
912     #include <signal.h>
913     +#include <linux/falloc.h>
914     #include <sys/ioctl.h>
915     #include <sys/mount.h>
916     #include <sys/socket.h>
917     diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
918     index 2db3972c0e0f..79583bac9ac4 100644
919     --- a/arch/x86/hyperv/hv_init.c
920     +++ b/arch/x86/hyperv/hv_init.c
921     @@ -19,6 +19,7 @@
922     #include <linux/mm.h>
923     #include <linux/hyperv.h>
924     #include <linux/slab.h>
925     +#include <linux/kernel.h>
926     #include <linux/cpuhotplug.h>
927     #include <clocksource/hyperv_timer.h>
928    
929     @@ -354,11 +355,14 @@ void hyperv_cleanup(void)
930     }
931     EXPORT_SYMBOL_GPL(hyperv_cleanup);
932    
933     -void hyperv_report_panic(struct pt_regs *regs, long err)
934     +void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
935     {
936     static bool panic_reported;
937     u64 guest_id;
938    
939     + if (in_die && !panic_on_oops)
940     + return;
941     +
942     /*
943     * We prefer to report panic on 'die' chain as we have proper
944     * registers to report, but if we miss it (e.g. on BUG()) we need
945     diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
946     index caf2edccbad2..49ae4e1ac9cd 100644
947     --- a/arch/x86/kernel/acpi/cstate.c
948     +++ b/arch/x86/kernel/acpi/cstate.c
949     @@ -161,7 +161,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
950    
951     /* Make sure we are running on right CPU */
952    
953     - retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx);
954     + retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx,
955     + false);
956     if (retval == 0) {
957     /* Use the hint in CST */
958     percpu_entry->states[cx->index].eax = cx->address;
959     diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
960     index c656d92cd708..fc8814faae62 100644
961     --- a/arch/x86/kernel/cpu/mshyperv.c
962     +++ b/arch/x86/kernel/cpu/mshyperv.c
963     @@ -263,6 +263,16 @@ static void __init ms_hyperv_init_platform(void)
964     cpuid_eax(HYPERV_CPUID_NESTED_FEATURES);
965     }
966    
967     + /*
968     + * Hyper-V expects to get crash register data or kmsg when
969     + * crash enlightment is available and system crashes. Set
970     + * crash_kexec_post_notifiers to be true to make sure that
971     + * calling crash enlightment interface before running kdump
972     + * kernel.
973     + */
974     + if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE)
975     + crash_kexec_post_notifiers = true;
976     +
977     #ifdef CONFIG_X86_LOCAL_APIC
978     if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
979     ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
980     diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
981     index 5611769e1569..12b707a4e52f 100644
982     --- a/block/bfq-cgroup.c
983     +++ b/block/bfq-cgroup.c
984     @@ -697,10 +697,7 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
985    
986     if (entity->sched_data != &bfqg->sched_data) {
987     bic_set_bfqq(bic, NULL, 0);
988     - bfq_log_bfqq(bfqd, async_bfqq,
989     - "bic_change_group: %p %d",
990     - async_bfqq, async_bfqq->ref);
991     - bfq_put_queue(async_bfqq);
992     + bfq_release_process_ref(bfqd, async_bfqq);
993     }
994     }
995    
996     @@ -801,39 +798,53 @@ static void bfq_flush_idle_tree(struct bfq_service_tree *st)
997     /**
998     * bfq_reparent_leaf_entity - move leaf entity to the root_group.
999     * @bfqd: the device data structure with the root group.
1000     - * @entity: the entity to move.
1001     + * @entity: the entity to move, if entity is a leaf; or the parent entity
1002     + * of an active leaf entity to move, if entity is not a leaf.
1003     */
1004     static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
1005     - struct bfq_entity *entity)
1006     + struct bfq_entity *entity,
1007     + int ioprio_class)
1008     {
1009     - struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1010     + struct bfq_queue *bfqq;
1011     + struct bfq_entity *child_entity = entity;
1012     +
1013     + while (child_entity->my_sched_data) { /* leaf not reached yet */
1014     + struct bfq_sched_data *child_sd = child_entity->my_sched_data;
1015     + struct bfq_service_tree *child_st = child_sd->service_tree +
1016     + ioprio_class;
1017     + struct rb_root *child_active = &child_st->active;
1018     +
1019     + child_entity = bfq_entity_of(rb_first(child_active));
1020     +
1021     + if (!child_entity)
1022     + child_entity = child_sd->in_service_entity;
1023     + }
1024    
1025     + bfqq = bfq_entity_to_bfqq(child_entity);
1026     bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
1027     }
1028    
1029     /**
1030     - * bfq_reparent_active_entities - move to the root group all active
1031     - * entities.
1032     + * bfq_reparent_active_queues - move to the root group all active queues.
1033     * @bfqd: the device data structure with the root group.
1034     * @bfqg: the group to move from.
1035     - * @st: the service tree with the entities.
1036     + * @st: the service tree to start the search from.
1037     */
1038     -static void bfq_reparent_active_entities(struct bfq_data *bfqd,
1039     - struct bfq_group *bfqg,
1040     - struct bfq_service_tree *st)
1041     +static void bfq_reparent_active_queues(struct bfq_data *bfqd,
1042     + struct bfq_group *bfqg,
1043     + struct bfq_service_tree *st,
1044     + int ioprio_class)
1045     {
1046     struct rb_root *active = &st->active;
1047     - struct bfq_entity *entity = NULL;
1048     -
1049     - if (!RB_EMPTY_ROOT(&st->active))
1050     - entity = bfq_entity_of(rb_first(active));
1051     + struct bfq_entity *entity;
1052    
1053     - for (; entity ; entity = bfq_entity_of(rb_first(active)))
1054     - bfq_reparent_leaf_entity(bfqd, entity);
1055     + while ((entity = bfq_entity_of(rb_first(active))))
1056     + bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
1057    
1058     if (bfqg->sched_data.in_service_entity)
1059     bfq_reparent_leaf_entity(bfqd,
1060     - bfqg->sched_data.in_service_entity);
1061     + bfqg->sched_data.in_service_entity,
1062     + ioprio_class);
1063     }
1064    
1065     /**
1066     @@ -865,13 +876,6 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
1067     for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
1068     st = bfqg->sched_data.service_tree + i;
1069    
1070     - /*
1071     - * The idle tree may still contain bfq_queues belonging
1072     - * to exited task because they never migrated to a different
1073     - * cgroup from the one being destroyed now.
1074     - */
1075     - bfq_flush_idle_tree(st);
1076     -
1077     /*
1078     * It may happen that some queues are still active
1079     * (busy) upon group destruction (if the corresponding
1080     @@ -884,7 +888,20 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
1081     * There is no need to put the sync queues, as the
1082     * scheduler has taken no reference.
1083     */
1084     - bfq_reparent_active_entities(bfqd, bfqg, st);
1085     + bfq_reparent_active_queues(bfqd, bfqg, st, i);
1086     +
1087     + /*
1088     + * The idle tree may still contain bfq_queues
1089     + * belonging to exited task because they never
1090     + * migrated to a different cgroup from the one being
1091     + * destroyed now. In addition, even
1092     + * bfq_reparent_active_queues() may happen to add some
1093     + * entities to the idle tree. It happens if, in some
1094     + * of the calls to bfq_bfqq_move() performed by
1095     + * bfq_reparent_active_queues(), the queue to move is
1096     + * empty and gets expired.
1097     + */
1098     + bfq_flush_idle_tree(st);
1099     }
1100    
1101     __bfq_deactivate_entity(entity, false);
1102     diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1103     index 5a825f9f1ea0..88497bff1135 100644
1104     --- a/block/bfq-iosched.c
1105     +++ b/block/bfq-iosched.c
1106     @@ -2717,8 +2717,6 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
1107     }
1108     }
1109    
1110     -
1111     -static
1112     void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1113     {
1114     /*
1115     diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
1116     index 1553a4e8f7ad..c0232975075d 100644
1117     --- a/block/bfq-iosched.h
1118     +++ b/block/bfq-iosched.h
1119     @@ -950,6 +950,7 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1120     bool compensate, enum bfqq_expiration reason);
1121     void bfq_put_queue(struct bfq_queue *bfqq);
1122     void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
1123     +void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq);
1124     void bfq_schedule_dispatch(struct bfq_data *bfqd);
1125     void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
1126    
1127     diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
1128     index 7da1864798a0..ecaa28733dc6 100644
1129     --- a/drivers/acpi/acpica/acnamesp.h
1130     +++ b/drivers/acpi/acpica/acnamesp.h
1131     @@ -256,6 +256,8 @@ u32
1132     acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
1133     char *full_path, u32 path_size, u8 no_trailing);
1134    
1135     +void acpi_ns_normalize_pathname(char *original_path);
1136     +
1137     char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
1138     u8 no_trailing);
1139    
1140     diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
1141     index 55a7e10998d8..1ef053585bbb 100644
1142     --- a/drivers/acpi/acpica/dbinput.c
1143     +++ b/drivers/acpi/acpica/dbinput.c
1144     @@ -464,16 +464,14 @@ char *acpi_db_get_next_token(char *string,
1145     return (NULL);
1146     }
1147    
1148     - /* Remove any spaces at the beginning */
1149     + /* Remove any spaces at the beginning, ignore blank lines */
1150    
1151     - if (*string == ' ') {
1152     - while (*string && (*string == ' ')) {
1153     - string++;
1154     - }
1155     + while (*string && isspace(*string)) {
1156     + string++;
1157     + }
1158    
1159     - if (!(*string)) {
1160     - return (NULL);
1161     - }
1162     + if (!(*string)) {
1163     + return (NULL);
1164     }
1165    
1166     switch (*string) {
1167     @@ -551,7 +549,7 @@ char *acpi_db_get_next_token(char *string,
1168    
1169     /* Find end of token */
1170    
1171     - while (*string && (*string != ' ')) {
1172     + while (*string && !isspace(*string)) {
1173     string++;
1174     }
1175     break;
1176     diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
1177     index d75aae304595..a68237b97c4c 100644
1178     --- a/drivers/acpi/acpica/dswexec.c
1179     +++ b/drivers/acpi/acpica/dswexec.c
1180     @@ -16,6 +16,9 @@
1181     #include "acinterp.h"
1182     #include "acnamesp.h"
1183     #include "acdebug.h"
1184     +#ifdef ACPI_EXEC_APP
1185     +#include "aecommon.h"
1186     +#endif
1187    
1188     #define _COMPONENT ACPI_DISPATCHER
1189     ACPI_MODULE_NAME("dswexec")
1190     @@ -329,6 +332,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
1191     u32 op_class;
1192     union acpi_parse_object *next_op;
1193     union acpi_parse_object *first_arg;
1194     +#ifdef ACPI_EXEC_APP
1195     + char *namepath;
1196     + union acpi_operand_object *obj_desc;
1197     +#endif
1198    
1199     ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state);
1200    
1201     @@ -537,6 +544,32 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
1202    
1203     status =
1204     acpi_ds_eval_buffer_field_operands(walk_state, op);
1205     + if (ACPI_FAILURE(status)) {
1206     + break;
1207     + }
1208     +#ifdef ACPI_EXEC_APP
1209     + /*
1210     + * acpi_exec support for namespace initialization file (initialize
1211     + * buffer_fields in this code.)
1212     + */
1213     + namepath =
1214     + acpi_ns_get_external_pathname(op->common.node);
1215     + status = ae_lookup_init_file_entry(namepath, &obj_desc);
1216     + if (ACPI_SUCCESS(status)) {
1217     + status =
1218     + acpi_ex_write_data_to_field(obj_desc,
1219     + op->common.
1220     + node->object,
1221     + NULL);
1222     + if ACPI_FAILURE
1223     + (status) {
1224     + ACPI_EXCEPTION((AE_INFO, status,
1225     + "While writing to buffer field"));
1226     + }
1227     + }
1228     + ACPI_FREE(namepath);
1229     + status = AE_OK;
1230     +#endif
1231     break;
1232    
1233     case AML_TYPE_CREATE_OBJECT:
1234     diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
1235     index 4bcf15bf03de..6cf93fae4d07 100644
1236     --- a/drivers/acpi/acpica/dswload.c
1237     +++ b/drivers/acpi/acpica/dswload.c
1238     @@ -14,7 +14,6 @@
1239     #include "acdispat.h"
1240     #include "acinterp.h"
1241     #include "acnamesp.h"
1242     -
1243     #ifdef ACPI_ASL_COMPILER
1244     #include "acdisasm.h"
1245     #endif
1246     @@ -399,7 +398,6 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
1247     union acpi_parse_object *op;
1248     acpi_object_type object_type;
1249     acpi_status status = AE_OK;
1250     -
1251     #ifdef ACPI_ASL_COMPILER
1252     u8 param_count;
1253     #endif
1254     diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
1255     index 935a8e2623e4..15d92bf15f0b 100644
1256     --- a/drivers/acpi/acpica/dswload2.c
1257     +++ b/drivers/acpi/acpica/dswload2.c
1258     @@ -15,6 +15,9 @@
1259     #include "acinterp.h"
1260     #include "acnamesp.h"
1261     #include "acevents.h"
1262     +#ifdef ACPI_EXEC_APP
1263     +#include "aecommon.h"
1264     +#endif
1265    
1266     #define _COMPONENT ACPI_DISPATCHER
1267     ACPI_MODULE_NAME("dswload2")
1268     @@ -373,6 +376,10 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
1269     struct acpi_namespace_node *new_node;
1270     u32 i;
1271     u8 region_space;
1272     +#ifdef ACPI_EXEC_APP
1273     + union acpi_operand_object *obj_desc;
1274     + char *namepath;
1275     +#endif
1276    
1277     ACPI_FUNCTION_TRACE(ds_load2_end_op);
1278    
1279     @@ -466,6 +473,11 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
1280     * be evaluated later during the execution phase
1281     */
1282     status = acpi_ds_create_buffer_field(op, walk_state);
1283     + if (ACPI_FAILURE(status)) {
1284     + ACPI_EXCEPTION((AE_INFO, status,
1285     + "CreateBufferField failure"));
1286     + goto cleanup;
1287     + }
1288     break;
1289    
1290     case AML_TYPE_NAMED_FIELD:
1291     @@ -604,6 +616,29 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
1292     case AML_NAME_OP:
1293    
1294     status = acpi_ds_create_node(walk_state, node, op);
1295     + if (ACPI_FAILURE(status)) {
1296     + goto cleanup;
1297     + }
1298     +#ifdef ACPI_EXEC_APP
1299     + /*
1300     + * acpi_exec support for namespace initialization file (initialize
1301     + * Name opcodes in this code.)
1302     + */
1303     + namepath = acpi_ns_get_external_pathname(node);
1304     + status = ae_lookup_init_file_entry(namepath, &obj_desc);
1305     + if (ACPI_SUCCESS(status)) {
1306     +
1307     + /* Detach any existing object, attach new object */
1308     +
1309     + if (node->object) {
1310     + acpi_ns_detach_object(node);
1311     + }
1312     + acpi_ns_attach_object(node, obj_desc,
1313     + obj_desc->common.type);
1314     + }
1315     + ACPI_FREE(namepath);
1316     + status = AE_OK;
1317     +#endif
1318     break;
1319    
1320     case AML_METHOD_OP:
1321     diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
1322     index 370bbc867745..c717fff7d9b5 100644
1323     --- a/drivers/acpi/acpica/nsnames.c
1324     +++ b/drivers/acpi/acpica/nsnames.c
1325     @@ -13,9 +13,6 @@
1326     #define _COMPONENT ACPI_NAMESPACE
1327     ACPI_MODULE_NAME("nsnames")
1328    
1329     -/* Local Prototypes */
1330     -static void acpi_ns_normalize_pathname(char *original_path);
1331     -
1332     /*******************************************************************************
1333     *
1334     * FUNCTION: acpi_ns_get_external_pathname
1335     @@ -30,7 +27,6 @@ static void acpi_ns_normalize_pathname(char *original_path);
1336     * for error and debug statements.
1337     *
1338     ******************************************************************************/
1339     -
1340     char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
1341     {
1342     char *name_buffer;
1343     @@ -411,7 +407,7 @@ cleanup:
1344     *
1345     ******************************************************************************/
1346    
1347     -static void acpi_ns_normalize_pathname(char *original_path)
1348     +void acpi_ns_normalize_pathname(char *original_path)
1349     {
1350     char *input_path = original_path;
1351     char *new_path_buffer;
1352     diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
1353     index eee263cb7beb..c365faf4e6cd 100644
1354     --- a/drivers/acpi/acpica/utdelete.c
1355     +++ b/drivers/acpi/acpica/utdelete.c
1356     @@ -452,13 +452,13 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
1357     *
1358     * FUNCTION: acpi_ut_update_object_reference
1359     *
1360     - * PARAMETERS: object - Increment ref count for this object
1361     - * and all sub-objects
1362     + * PARAMETERS: object - Increment or decrement the ref count for
1363     + * this object and all sub-objects
1364     * action - Either REF_INCREMENT or REF_DECREMENT
1365     *
1366     * RETURN: Status
1367     *
1368     - * DESCRIPTION: Increment the object reference count
1369     + * DESCRIPTION: Increment or decrement the object reference count
1370     *
1371     * Object references are incremented when:
1372     * 1) An object is attached to a Node (namespace object)
1373     @@ -492,7 +492,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
1374     }
1375    
1376     /*
1377     - * All sub-objects must have their reference count incremented
1378     + * All sub-objects must have their reference count updated
1379     * also. Different object types have different subobjects.
1380     */
1381     switch (object->common.type) {
1382     @@ -559,6 +559,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
1383     break;
1384     }
1385     }
1386     +
1387     next_object = NULL;
1388     break;
1389    
1390     diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
1391     index 532a1ae3595a..a0bd56ece3ff 100644
1392     --- a/drivers/acpi/processor_throttling.c
1393     +++ b/drivers/acpi/processor_throttling.c
1394     @@ -897,13 +897,6 @@ static long __acpi_processor_get_throttling(void *data)
1395     return pr->throttling.acpi_processor_get_throttling(pr);
1396     }
1397    
1398     -static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
1399     -{
1400     - if (direct || (is_percpu_thread() && cpu == smp_processor_id()))
1401     - return fn(arg);
1402     - return work_on_cpu(cpu, fn, arg);
1403     -}
1404     -
1405     static int acpi_processor_get_throttling(struct acpi_processor *pr)
1406     {
1407     if (!pr)
1408     diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
1409     index a67315786db4..274beda31c35 100644
1410     --- a/drivers/block/rbd.c
1411     +++ b/drivers/block/rbd.c
1412     @@ -4636,6 +4636,10 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev)
1413     cancel_work_sync(&rbd_dev->unlock_work);
1414     }
1415    
1416     +/*
1417     + * header_rwsem must not be held to avoid a deadlock with
1418     + * rbd_dev_refresh() when flushing notifies.
1419     + */
1420     static void rbd_unregister_watch(struct rbd_device *rbd_dev)
1421     {
1422     cancel_tasks_sync(rbd_dev);
1423     @@ -6929,9 +6933,10 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev)
1424    
1425     static void rbd_dev_image_release(struct rbd_device *rbd_dev)
1426     {
1427     - rbd_dev_unprobe(rbd_dev);
1428     if (rbd_dev->opts)
1429     rbd_unregister_watch(rbd_dev);
1430     +
1431     + rbd_dev_unprobe(rbd_dev);
1432     rbd_dev->image_format = 0;
1433     kfree(rbd_dev->spec->image_id);
1434     rbd_dev->spec->image_id = NULL;
1435     @@ -6942,6 +6947,9 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
1436     * device. If this image is the one being mapped (i.e., not a
1437     * parent), initiate a watch on its header object before using that
1438     * object to get detailed information about the rbd image.
1439     + *
1440     + * On success, returns with header_rwsem held for write if called
1441     + * with @depth == 0.
1442     */
1443     static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
1444     {
1445     @@ -6974,9 +6982,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
1446     }
1447     }
1448    
1449     + if (!depth)
1450     + down_write(&rbd_dev->header_rwsem);
1451     +
1452     ret = rbd_dev_header_info(rbd_dev);
1453     if (ret)
1454     - goto err_out_watch;
1455     + goto err_out_probe;
1456    
1457     /*
1458     * If this image is the one being mapped, we have pool name and
1459     @@ -7025,10 +7036,11 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
1460     return 0;
1461    
1462     err_out_probe:
1463     - rbd_dev_unprobe(rbd_dev);
1464     -err_out_watch:
1465     + if (!depth)
1466     + up_write(&rbd_dev->header_rwsem);
1467     if (!depth)
1468     rbd_unregister_watch(rbd_dev);
1469     + rbd_dev_unprobe(rbd_dev);
1470     err_out_format:
1471     rbd_dev->image_format = 0;
1472     kfree(rbd_dev->spec->image_id);
1473     @@ -7085,12 +7097,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
1474     goto err_out_rbd_dev;
1475     }
1476    
1477     - down_write(&rbd_dev->header_rwsem);
1478     rc = rbd_dev_image_probe(rbd_dev, 0);
1479     - if (rc < 0) {
1480     - up_write(&rbd_dev->header_rwsem);
1481     + if (rc < 0)
1482     goto err_out_rbd_dev;
1483     - }
1484    
1485     /* If we are mapping a snapshot it must be marked read-only */
1486     if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
1487     diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
1488     index bda92980e015..c0895c993cce 100644
1489     --- a/drivers/clk/at91/clk-usb.c
1490     +++ b/drivers/clk/at91/clk-usb.c
1491     @@ -75,6 +75,9 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
1492     tmp_parent_rate = req->rate * div;
1493     tmp_parent_rate = clk_hw_round_rate(parent,
1494     tmp_parent_rate);
1495     + if (!tmp_parent_rate)
1496     + continue;
1497     +
1498     tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
1499     if (tmp_rate < req->rate)
1500     tmp_diff = req->rate - tmp_rate;
1501     diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
1502     index 62d0fc486d3a..80b029713722 100644
1503     --- a/drivers/clk/clk.c
1504     +++ b/drivers/clk/clk.c
1505     @@ -2642,12 +2642,14 @@ static int clk_core_get_phase(struct clk_core *core)
1506     {
1507     int ret;
1508    
1509     - clk_prepare_lock();
1510     + lockdep_assert_held(&prepare_lock);
1511     + if (!core->ops->get_phase)
1512     + return 0;
1513     +
1514     /* Always try to update cached phase if possible */
1515     - if (core->ops->get_phase)
1516     - core->phase = core->ops->get_phase(core->hw);
1517     - ret = core->phase;
1518     - clk_prepare_unlock();
1519     + ret = core->ops->get_phase(core->hw);
1520     + if (ret >= 0)
1521     + core->phase = ret;
1522    
1523     return ret;
1524     }
1525     @@ -2661,10 +2663,16 @@ static int clk_core_get_phase(struct clk_core *core)
1526     */
1527     int clk_get_phase(struct clk *clk)
1528     {
1529     + int ret;
1530     +
1531     if (!clk)
1532     return 0;
1533    
1534     - return clk_core_get_phase(clk->core);
1535     + clk_prepare_lock();
1536     + ret = clk_core_get_phase(clk->core);
1537     + clk_prepare_unlock();
1538     +
1539     + return ret;
1540     }
1541     EXPORT_SYMBOL_GPL(clk_get_phase);
1542    
1543     @@ -2878,13 +2886,21 @@ static struct hlist_head *orphan_list[] = {
1544     static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
1545     int level)
1546     {
1547     - seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
1548     + int phase;
1549     +
1550     + seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
1551     level * 3 + 1, "",
1552     30 - level * 3, c->name,
1553     c->enable_count, c->prepare_count, c->protect_count,
1554     - clk_core_get_rate(c), clk_core_get_accuracy(c),
1555     - clk_core_get_phase(c),
1556     - clk_core_get_scaled_duty_cycle(c, 100000));
1557     + clk_core_get_rate(c), clk_core_get_accuracy(c));
1558     +
1559     + phase = clk_core_get_phase(c);
1560     + if (phase >= 0)
1561     + seq_printf(s, "%5d", phase);
1562     + else
1563     + seq_puts(s, "-----");
1564     +
1565     + seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
1566     }
1567    
1568     static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
1569     @@ -2921,6 +2937,7 @@ DEFINE_SHOW_ATTRIBUTE(clk_summary);
1570    
1571     static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
1572     {
1573     + int phase;
1574     unsigned long min_rate, max_rate;
1575    
1576     clk_core_get_boundaries(c, &min_rate, &max_rate);
1577     @@ -2934,7 +2951,9 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
1578     seq_printf(s, "\"min_rate\": %lu,", min_rate);
1579     seq_printf(s, "\"max_rate\": %lu,", max_rate);
1580     seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
1581     - seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
1582     + phase = clk_core_get_phase(c);
1583     + if (phase >= 0)
1584     + seq_printf(s, "\"phase\": %d,", phase);
1585     seq_printf(s, "\"duty_cycle\": %u",
1586     clk_core_get_scaled_duty_cycle(c, 100000));
1587     }
1588     @@ -3375,14 +3394,11 @@ static int __clk_core_init(struct clk_core *core)
1589     core->accuracy = 0;
1590    
1591     /*
1592     - * Set clk's phase.
1593     + * Set clk's phase by clk_core_get_phase() caching the phase.
1594     * Since a phase is by definition relative to its parent, just
1595     * query the current clock phase, or just assume it's in phase.
1596     */
1597     - if (core->ops->get_phase)
1598     - core->phase = core->ops->get_phase(core->hw);
1599     - else
1600     - core->phase = 0;
1601     + clk_core_get_phase(core);
1602    
1603     /*
1604     * Set clk's duty cycle.
1605     diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c
1606     index bec3e008335f..5e044ba1ae36 100644
1607     --- a/drivers/clk/tegra/clk-tegra-pmc.c
1608     +++ b/drivers/clk/tegra/clk-tegra-pmc.c
1609     @@ -49,16 +49,16 @@ struct pmc_clk_init_data {
1610    
1611     static DEFINE_SPINLOCK(clk_out_lock);
1612    
1613     -static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
1614     - "clk_m_div4", "extern1",
1615     +static const char *clk_out1_parents[] = { "osc", "osc_div2",
1616     + "osc_div4", "extern1",
1617     };
1618    
1619     -static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
1620     - "clk_m_div4", "extern2",
1621     +static const char *clk_out2_parents[] = { "osc", "osc_div2",
1622     + "osc_div4", "extern2",
1623     };
1624    
1625     -static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
1626     - "clk_m_div4", "extern3",
1627     +static const char *clk_out3_parents[] = { "osc", "osc_div2",
1628     + "osc_div4", "extern3",
1629     };
1630    
1631     static struct pmc_clk_init_data pmc_clks[] = {
1632     diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
1633     index 0dc1084b5e82..ad9483b9eea3 100644
1634     --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
1635     +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
1636     @@ -1112,9 +1112,9 @@ kfd_gtt_out:
1637     return 0;
1638    
1639     kfd_gtt_no_free_chunk:
1640     - pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
1641     + pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1642     mutex_unlock(&kfd->gtt_sa_lock);
1643     - kfree(mem_obj);
1644     + kfree(*mem_obj);
1645     return -ENOMEM;
1646     }
1647    
1648     diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
1649     index 2cd83849600f..b1beed40e746 100644
1650     --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
1651     +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
1652     @@ -618,6 +618,64 @@ nouveau_drm_device_fini(struct drm_device *dev)
1653     kfree(drm);
1654     }
1655    
1656     +/*
1657     + * On some Intel PCIe bridge controllers doing a
1658     + * D0 -> D3hot -> D3cold -> D0 sequence causes Nvidia GPUs to not reappear.
1659     + * Skipping the intermediate D3hot step seems to make it work again. This is
1660     + * probably caused by not meeting the expectation the involved AML code has
1661     + * when the GPU is put into D3hot state before invoking it.
1662     + *
1663     + * This leads to various manifestations of this issue:
1664     + * - AML code execution to power on the GPU hits an infinite loop (as the
1665     + * code waits on device memory to change).
1666     + * - kernel crashes, as all PCI reads return -1, which most code isn't able
1667     + * to handle well enough.
1668     + *
1669     + * In all cases dmesg will contain at least one line like this:
1670     + * 'nouveau 0000:01:00.0: Refused to change power state, currently in D3'
1671     + * followed by a lot of nouveau timeouts.
1672     + *
1673     + * In the \_SB.PCI0.PEG0.PG00._OFF code deeper down writes bit 0x80 to the not
1674     + * documented PCI config space register 0x248 of the Intel PCIe bridge
1675     + * controller (0x1901) in order to change the state of the PCIe link between
1676     + * the PCIe port and the GPU. There are alternative code paths using other
1677     + * registers, which seem to work fine (executed pre Windows 8):
1678     + * - 0xbc bit 0x20 (publicly available documentation claims 'reserved')
1679     + * - 0xb0 bit 0x10 (link disable)
1680     + * Changing the conditions inside the firmware by poking into the relevant
1681     + * addresses does resolve the issue, but it seemed to be ACPI private memory
1682     + * and not any device accessible memory at all, so there is no portable way of
1683     + * changing the conditions.
1684     + * On a XPS 9560 that means bits [0,3] on \CPEX need to be cleared.
1685     + *
1686     + * The only systems where this behavior can be seen are hybrid graphics laptops
1687     + * with a secondary Nvidia Maxwell, Pascal or Turing GPU. It's unclear whether
1688     + * this issue only occurs in combination with listed Intel PCIe bridge
1689     + * controllers and the mentioned GPUs or other devices as well.
1690     + *
1691     + * documentation on the PCIe bridge controller can be found in the
1692     + * "7th Generation Intel® Processor Families for H Platforms Datasheet Volume 2"
1693     + * Section "12 PCI Express* Controller (x16) Registers"
1694     + */
1695     +
1696     +static void quirk_broken_nv_runpm(struct pci_dev *pdev)
1697     +{
1698     + struct drm_device *dev = pci_get_drvdata(pdev);
1699     + struct nouveau_drm *drm = nouveau_drm(dev);
1700     + struct pci_dev *bridge = pci_upstream_bridge(pdev);
1701     +
1702     + if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL)
1703     + return;
1704     +
1705     + switch (bridge->device) {
1706     + case 0x1901:
1707     + drm->old_pm_cap = pdev->pm_cap;
1708     + pdev->pm_cap = 0;
1709     + NV_INFO(drm, "Disabling PCI power management to avoid bug\n");
1710     + break;
1711     + }
1712     +}
1713     +
1714     static int nouveau_drm_probe(struct pci_dev *pdev,
1715     const struct pci_device_id *pent)
1716     {
1717     @@ -699,6 +757,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
1718     if (ret)
1719     goto fail_drm_dev_init;
1720    
1721     + quirk_broken_nv_runpm(pdev);
1722     return 0;
1723    
1724     fail_drm_dev_init:
1725     @@ -736,7 +795,11 @@ static void
1726     nouveau_drm_remove(struct pci_dev *pdev)
1727     {
1728     struct drm_device *dev = pci_get_drvdata(pdev);
1729     + struct nouveau_drm *drm = nouveau_drm(dev);
1730    
1731     + /* revert our workaround */
1732     + if (drm->old_pm_cap)
1733     + pdev->pm_cap = drm->old_pm_cap;
1734     nouveau_drm_device_remove(dev);
1735     }
1736    
1737     diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
1738     index 70f34cacc552..8104e3806499 100644
1739     --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
1740     +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
1741     @@ -138,6 +138,8 @@ struct nouveau_drm {
1742    
1743     struct list_head clients;
1744    
1745     + u8 old_pm_cap;
1746     +
1747     struct {
1748     struct agp_bridge_data *bridge;
1749     u32 base;
1750     diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
1751     index 668d4bd0c118..824654742a60 100644
1752     --- a/drivers/gpu/drm/nouveau/nouveau_svm.c
1753     +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
1754     @@ -173,6 +173,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
1755     mm = get_task_mm(current);
1756     down_read(&mm->mmap_sem);
1757    
1758     + if (!cli->svm.svmm) {
1759     + up_read(&mm->mmap_sem);
1760     + return -EINVAL;
1761     + }
1762     +
1763     for (addr = args->va_start, end = args->va_start + size; addr < end;) {
1764     struct vm_area_struct *vma;
1765     unsigned long next;
1766     @@ -181,6 +186,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
1767     if (!vma)
1768     break;
1769    
1770     + addr = max(addr, vma->vm_start);
1771     next = min(vma->vm_end, end);
1772     /* This is a best effort so we ignore errors */
1773     nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
1774     diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1775     index c578deb5867a..c71606a45d1d 100644
1776     --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1777     +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1778     @@ -1988,8 +1988,34 @@ gf100_gr_init_(struct nvkm_gr *base)
1779     {
1780     struct gf100_gr *gr = gf100_gr(base);
1781     struct nvkm_subdev *subdev = &base->engine.subdev;
1782     + struct nvkm_device *device = subdev->device;
1783     + bool reset = device->chipset == 0x137 || device->chipset == 0x138;
1784     u32 ret;
1785    
1786     + /* On certain GP107/GP108 boards, we trigger a weird issue where
1787     + * GR will stop responding to PRI accesses after we've asked the
1788     + * SEC2 RTOS to boot the GR falcons. This happens with far more
1789     + * frequency when cold-booting a board (ie. returning from D3).
1790     + *
1791     + * The root cause for this is not known and has proven difficult
1792     + * to isolate, with many avenues being dead-ends.
1793     + *
1794     + * A workaround was discovered by Karol, whereby putting GR into
1795     + * reset for an extended period right before initialisation
1796     + * prevents the problem from occuring.
1797     + *
1798     + * XXX: As RM does not require any such workaround, this is more
1799     + * of a hack than a true fix.
1800     + */
1801     + reset = nvkm_boolopt(device->cfgopt, "NvGrResetWar", reset);
1802     + if (reset) {
1803     + nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
1804     + nvkm_rd32(device, 0x000200);
1805     + msleep(50);
1806     + nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
1807     + nvkm_rd32(device, 0x000200);
1808     + }
1809     +
1810     nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
1811    
1812     ret = nvkm_falcon_get(gr->fecs.falcon, subdev);
1813     diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
1814     index f07803699809..abf165b2f64f 100644
1815     --- a/drivers/gpu/drm/ttm/ttm_bo.c
1816     +++ b/drivers/gpu/drm/ttm/ttm_bo.c
1817     @@ -517,8 +517,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
1818    
1819     dma_resv_unlock(bo->base.resv);
1820     }
1821     - if (bo->base.resv != &bo->base._resv)
1822     + if (bo->base.resv != &bo->base._resv) {
1823     + ttm_bo_flush_all_fences(bo);
1824     dma_resv_unlock(&bo->base._resv);
1825     + }
1826    
1827     error:
1828     kref_get(&bo->list_kref);
1829     diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
1830     index 0853b980bcb3..d5f5ba410524 100644
1831     --- a/drivers/gpu/drm/vc4/vc4_hdmi.c
1832     +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
1833     @@ -681,11 +681,23 @@ static enum drm_mode_status
1834     vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc,
1835     const struct drm_display_mode *mode)
1836     {
1837     - /* HSM clock must be 108% of the pixel clock. Additionally,
1838     - * the AXI clock needs to be at least 25% of pixel clock, but
1839     - * HSM ends up being the limiting factor.
1840     + /*
1841     + * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
1842     + * be faster than pixel clock, infinitesimally faster, tested in
1843     + * simulation. Otherwise, exact value is unimportant for HDMI
1844     + * operation." This conflicts with bcm2835's vc4 documentation, which
1845     + * states HSM's clock has to be at least 108% of the pixel clock.
1846     + *
1847     + * Real life tests reveal that vc4's firmware statement holds up, and
1848     + * users are able to use pixel clocks closer to HSM's, namely for
1849     + * 1920x1200@60Hz. So it was decided to have leave a 1% margin between
1850     + * both clocks. Which, for RPi0-3 implies a maximum pixel clock of
1851     + * 162MHz.
1852     + *
1853     + * Additionally, the AXI clock needs to be at least 25% of
1854     + * pixel clock, but HSM ends up being the limiting factor.
1855     */
1856     - if (mode->clock > HSM_CLOCK_FREQ / (1000 * 108 / 100))
1857     + if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100))
1858     return MODE_CLOCK_HIGH;
1859    
1860     return MODE_OK;
1861     diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
1862     index 8eb167540b4f..c8296d5e74c3 100644
1863     --- a/drivers/hv/channel_mgmt.c
1864     +++ b/drivers/hv/channel_mgmt.c
1865     @@ -839,6 +839,9 @@ void vmbus_initiate_unload(bool crash)
1866     {
1867     struct vmbus_channel_message_header hdr;
1868    
1869     + if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
1870     + return;
1871     +
1872     /* Pre-Win2012R2 hosts don't support reconnect */
1873     if (vmbus_proto_version < VERSION_WIN8_1)
1874     return;
1875     diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
1876     index 05ead1735c6e..40f6b73dae94 100644
1877     --- a/drivers/hv/vmbus_drv.c
1878     +++ b/drivers/hv/vmbus_drv.c
1879     @@ -31,6 +31,7 @@
1880     #include <linux/kdebug.h>
1881     #include <linux/efi.h>
1882     #include <linux/random.h>
1883     +#include <linux/kernel.h>
1884     #include <linux/syscore_ops.h>
1885     #include <clocksource/hyperv_timer.h>
1886     #include "hyperv_vmbus.h"
1887     @@ -48,14 +49,35 @@ static int hyperv_cpuhp_online;
1888    
1889     static void *hv_panic_page;
1890    
1891     +/*
1892     + * Boolean to control whether to report panic messages over Hyper-V.
1893     + *
1894     + * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
1895     + */
1896     +static int sysctl_record_panic_msg = 1;
1897     +
1898     +static int hyperv_report_reg(void)
1899     +{
1900     + return !sysctl_record_panic_msg || !hv_panic_page;
1901     +}
1902     +
1903     static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
1904     void *args)
1905     {
1906     struct pt_regs *regs;
1907    
1908     - regs = current_pt_regs();
1909     + vmbus_initiate_unload(true);
1910    
1911     - hyperv_report_panic(regs, val);
1912     + /*
1913     + * Hyper-V should be notified only once about a panic. If we will be
1914     + * doing hyperv_report_panic_msg() later with kmsg data, don't do
1915     + * the notification here.
1916     + */
1917     + if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
1918     + && hyperv_report_reg()) {
1919     + regs = current_pt_regs();
1920     + hyperv_report_panic(regs, val, false);
1921     + }
1922     return NOTIFY_DONE;
1923     }
1924    
1925     @@ -65,7 +87,13 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
1926     struct die_args *die = (struct die_args *)args;
1927     struct pt_regs *regs = die->regs;
1928    
1929     - hyperv_report_panic(regs, val);
1930     + /*
1931     + * Hyper-V should be notified only once about a panic. If we will be
1932     + * doing hyperv_report_panic_msg() later with kmsg data, don't do
1933     + * the notification here.
1934     + */
1935     + if (hyperv_report_reg())
1936     + hyperv_report_panic(regs, val, true);
1937     return NOTIFY_DONE;
1938     }
1939    
1940     @@ -1246,13 +1274,6 @@ static void vmbus_isr(void)
1941     add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
1942     }
1943    
1944     -/*
1945     - * Boolean to control whether to report panic messages over Hyper-V.
1946     - *
1947     - * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
1948     - */
1949     -static int sysctl_record_panic_msg = 1;
1950     -
1951     /*
1952     * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
1953     * buffer and call into Hyper-V to transfer the data.
1954     @@ -1380,19 +1401,29 @@ static int vmbus_bus_init(void)
1955     hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL);
1956     if (hv_panic_page) {
1957     ret = kmsg_dump_register(&hv_kmsg_dumper);
1958     - if (ret)
1959     + if (ret) {
1960     pr_err("Hyper-V: kmsg dump register "
1961     "error 0x%x\n", ret);
1962     + hv_free_hyperv_page(
1963     + (unsigned long)hv_panic_page);
1964     + hv_panic_page = NULL;
1965     + }
1966     } else
1967     pr_err("Hyper-V: panic message page memory "
1968     "allocation failed");
1969     }
1970    
1971     register_die_notifier(&hyperv_die_block);
1972     - atomic_notifier_chain_register(&panic_notifier_list,
1973     - &hyperv_panic_block);
1974     }
1975    
1976     + /*
1977     + * Always register the panic notifier because we need to unload
1978     + * the VMbus channel connection to prevent any VMbus
1979     + * activity after the VM panics.
1980     + */
1981     + atomic_notifier_chain_register(&panic_notifier_list,
1982     + &hyperv_panic_block);
1983     +
1984     vmbus_request_offers();
1985    
1986     return 0;
1987     @@ -1406,7 +1437,6 @@ err_alloc:
1988     hv_remove_vmbus_irq();
1989    
1990     bus_unregister(&hv_bus);
1991     - free_page((unsigned long)hv_panic_page);
1992     unregister_sysctl_table(hv_ctl_table_hdr);
1993     hv_ctl_table_hdr = NULL;
1994     return ret;
1995     @@ -2202,8 +2232,6 @@ static int vmbus_bus_suspend(struct device *dev)
1996    
1997     vmbus_initiate_unload(false);
1998    
1999     - vmbus_connection.conn_state = DISCONNECTED;
2000     -
2001     /* Reset the event for the next resume. */
2002     reinit_completion(&vmbus_connection.ready_for_resume_event);
2003    
2004     @@ -2288,7 +2316,6 @@ static void hv_kexec_handler(void)
2005     {
2006     hv_stimer_global_cleanup();
2007     vmbus_initiate_unload(false);
2008     - vmbus_connection.conn_state = DISCONNECTED;
2009     /* Make sure conn_state is set as hv_synic_cleanup checks for it */
2010     mb();
2011     cpuhp_remove_state(hyperv_cpuhp_online);
2012     @@ -2305,7 +2332,6 @@ static void hv_crash_handler(struct pt_regs *regs)
2013     * doing the cleanup for current CPU only. This should be sufficient
2014     * for kdump.
2015     */
2016     - vmbus_connection.conn_state = DISCONNECTED;
2017     cpu = smp_processor_id();
2018     hv_stimer_cleanup(cpu);
2019     hv_synic_disable_regs(cpu);
2020     diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c
2021     index 015a21f0c2ef..9174ab928880 100644
2022     --- a/drivers/iio/light/si1133.c
2023     +++ b/drivers/iio/light/si1133.c
2024     @@ -102,6 +102,9 @@
2025     #define SI1133_INPUT_FRACTION_LOW 15
2026     #define SI1133_LUX_OUTPUT_FRACTION 12
2027     #define SI1133_LUX_BUFFER_SIZE 9
2028     +#define SI1133_MEASURE_BUFFER_SIZE 3
2029     +
2030     +#define SI1133_SIGN_BIT_INDEX 23
2031    
2032     static const int si1133_scale_available[] = {
2033     1, 2, 4, 8, 16, 32, 64, 128};
2034     @@ -234,13 +237,13 @@ static const struct si1133_lux_coeff lux_coeff = {
2035     }
2036     };
2037    
2038     -static int si1133_calculate_polynomial_inner(u32 input, u8 fraction, u16 mag,
2039     +static int si1133_calculate_polynomial_inner(s32 input, u8 fraction, u16 mag,
2040     s8 shift)
2041     {
2042     return ((input << fraction) / mag) << shift;
2043     }
2044    
2045     -static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order,
2046     +static int si1133_calculate_output(s32 x, s32 y, u8 x_order, u8 y_order,
2047     u8 input_fraction, s8 sign,
2048     const struct si1133_coeff *coeffs)
2049     {
2050     @@ -276,7 +279,7 @@ static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order,
2051     * The algorithm is from:
2052     * https://siliconlabs.github.io/Gecko_SDK_Doc/efm32zg/html/si1133_8c_source.html#l00716
2053     */
2054     -static int si1133_calc_polynomial(u32 x, u32 y, u8 input_fraction, u8 num_coeff,
2055     +static int si1133_calc_polynomial(s32 x, s32 y, u8 input_fraction, u8 num_coeff,
2056     const struct si1133_coeff *coeffs)
2057     {
2058     u8 x_order, y_order;
2059     @@ -614,7 +617,7 @@ static int si1133_measure(struct si1133_data *data,
2060     {
2061     int err;
2062    
2063     - __be16 resp;
2064     + u8 buffer[SI1133_MEASURE_BUFFER_SIZE];
2065    
2066     err = si1133_set_adcmux(data, 0, chan->channel);
2067     if (err)
2068     @@ -625,12 +628,13 @@ static int si1133_measure(struct si1133_data *data,
2069     if (err)
2070     return err;
2071    
2072     - err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(resp),
2073     - (u8 *)&resp);
2074     + err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(buffer),
2075     + buffer);
2076     if (err)
2077     return err;
2078    
2079     - *val = be16_to_cpu(resp);
2080     + *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
2081     + SI1133_SIGN_BIT_INDEX);
2082    
2083     return err;
2084     }
2085     @@ -704,9 +708,9 @@ static int si1133_get_lux(struct si1133_data *data, int *val)
2086     {
2087     int err;
2088     int lux;
2089     - u32 high_vis;
2090     - u32 low_vis;
2091     - u32 ir;
2092     + s32 high_vis;
2093     + s32 low_vis;
2094     + s32 ir;
2095     u8 buffer[SI1133_LUX_BUFFER_SIZE];
2096    
2097     /* Activate lux channels */
2098     @@ -719,9 +723,16 @@ static int si1133_get_lux(struct si1133_data *data, int *val)
2099     if (err)
2100     return err;
2101    
2102     - high_vis = (buffer[0] << 16) | (buffer[1] << 8) | buffer[2];
2103     - low_vis = (buffer[3] << 16) | (buffer[4] << 8) | buffer[5];
2104     - ir = (buffer[6] << 16) | (buffer[7] << 8) | buffer[8];
2105     + high_vis =
2106     + sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
2107     + SI1133_SIGN_BIT_INDEX);
2108     +
2109     + low_vis =
2110     + sign_extend32((buffer[3] << 16) | (buffer[4] << 8) | buffer[5],
2111     + SI1133_SIGN_BIT_INDEX);
2112     +
2113     + ir = sign_extend32((buffer[6] << 16) | (buffer[7] << 8) | buffer[8],
2114     + SI1133_SIGN_BIT_INDEX);
2115    
2116     if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD)
2117     lux = si1133_calc_polynomial(high_vis, ir,
2118     diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
2119     index daeabd98c60e..0679896b9e2e 100644
2120     --- a/drivers/iommu/amd_iommu_types.h
2121     +++ b/drivers/iommu/amd_iommu_types.h
2122     @@ -348,7 +348,7 @@
2123    
2124     #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
2125     #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
2126     -#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
2127     +#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL)
2128    
2129     #define DTE_GCR3_INDEX_A 0
2130     #define DTE_GCR3_INDEX_B 1
2131     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2132     index 0d922eeae357..773ac2b0d606 100644
2133     --- a/drivers/iommu/intel-iommu.c
2134     +++ b/drivers/iommu/intel-iommu.c
2135     @@ -4335,7 +4335,8 @@ static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
2136     struct dmar_atsr_unit *atsru;
2137     struct acpi_dmar_atsr *tmp;
2138    
2139     - list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
2140     + list_for_each_entry_rcu(atsru, &dmar_atsr_units, list,
2141     + dmar_rcu_check()) {
2142     tmp = (struct acpi_dmar_atsr *)atsru->hdr;
2143     if (atsr->segment != tmp->segment)
2144     continue;
2145     diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
2146     index 518d0b2d12af..1d3816cd65d5 100644
2147     --- a/drivers/iommu/intel-svm.c
2148     +++ b/drivers/iommu/intel-svm.c
2149     @@ -502,7 +502,7 @@ struct page_req_dsc {
2150     u64 priv_data[2];
2151     };
2152    
2153     -#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
2154     +#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
2155    
2156     static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
2157     {
2158     @@ -583,14 +583,15 @@ static irqreturn_t prq_event_thread(int irq, void *d)
2159     * any faults on kernel addresses. */
2160     if (!svm->mm)
2161     goto bad_req;
2162     - /* If the mm is already defunct, don't handle faults. */
2163     - if (!mmget_not_zero(svm->mm))
2164     - goto bad_req;
2165    
2166     /* If address is not canonical, return invalid response */
2167     if (!is_canonical_address(address))
2168     goto bad_req;
2169    
2170     + /* If the mm is already defunct, don't handle faults. */
2171     + if (!mmget_not_zero(svm->mm))
2172     + goto bad_req;
2173     +
2174     down_read(&svm->mm->mmap_sem);
2175     vma = find_extend_vma(svm->mm, address);
2176     if (!vma || address < vma->vm_start)
2177     diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
2178     index 3ea9d7682999..6c340a4f4fd2 100644
2179     --- a/drivers/iommu/virtio-iommu.c
2180     +++ b/drivers/iommu/virtio-iommu.c
2181     @@ -614,18 +614,20 @@ static int viommu_domain_finalise(struct viommu_dev *viommu,
2182     int ret;
2183     struct viommu_domain *vdomain = to_viommu_domain(domain);
2184    
2185     - vdomain->viommu = viommu;
2186     - vdomain->map_flags = viommu->map_flags;
2187     + ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
2188     + viommu->last_domain, GFP_KERNEL);
2189     + if (ret < 0)
2190     + return ret;
2191     +
2192     + vdomain->id = (unsigned int)ret;
2193    
2194     domain->pgsize_bitmap = viommu->pgsize_bitmap;
2195     domain->geometry = viommu->geometry;
2196    
2197     - ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
2198     - viommu->last_domain, GFP_KERNEL);
2199     - if (ret >= 0)
2200     - vdomain->id = (unsigned int)ret;
2201     + vdomain->map_flags = viommu->map_flags;
2202     + vdomain->viommu = viommu;
2203    
2204     - return ret > 0 ? 0 : ret;
2205     + return 0;
2206     }
2207    
2208     static void viommu_domain_free(struct iommu_domain *domain)
2209     diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
2210     index 6b566bba263b..ff7627b57772 100644
2211     --- a/drivers/irqchip/irq-mbigen.c
2212     +++ b/drivers/irqchip/irq-mbigen.c
2213     @@ -220,10 +220,16 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain,
2214     return 0;
2215     }
2216    
2217     +static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2218     + unsigned int nr_irqs)
2219     +{
2220     + platform_msi_domain_free(domain, virq, nr_irqs);
2221     +}
2222     +
2223     static const struct irq_domain_ops mbigen_domain_ops = {
2224     .translate = mbigen_domain_translate,
2225     .alloc = mbigen_irq_domain_alloc,
2226     - .free = irq_domain_free_irqs_common,
2227     + .free = mbigen_irq_domain_free,
2228     };
2229    
2230     static int mbigen_of_create_domain(struct platform_device *pdev,
2231     diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
2232     index 647b1263c579..d3e83c33783e 100644
2233     --- a/drivers/leds/led-class.c
2234     +++ b/drivers/leds/led-class.c
2235     @@ -281,7 +281,7 @@ int led_classdev_register_ext(struct device *parent,
2236    
2237     if (ret)
2238     dev_warn(parent, "Led %s renamed to %s due to name collision",
2239     - led_cdev->name, dev_name(led_cdev->dev));
2240     + proposed_name, dev_name(led_cdev->dev));
2241    
2242     if (led_cdev->flags & LED_BRIGHT_HW_CHANGED) {
2243     ret = led_add_brightness_hw_changed(led_cdev);
2244     diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
2245     index 931e5c2481b5..b50ec7ecd10c 100644
2246     --- a/drivers/mtd/devices/phram.c
2247     +++ b/drivers/mtd/devices/phram.c
2248     @@ -243,22 +243,25 @@ static int phram_setup(const char *val)
2249    
2250     ret = parse_num64(&start, token[1]);
2251     if (ret) {
2252     - kfree(name);
2253     parse_err("illegal start address\n");
2254     + goto error;
2255     }
2256    
2257     ret = parse_num64(&len, token[2]);
2258     if (ret) {
2259     - kfree(name);
2260     parse_err("illegal device length\n");
2261     + goto error;
2262     }
2263    
2264     ret = register_device(name, start, len);
2265     - if (!ret)
2266     - pr_info("%s device: %#llx at %#llx\n", name, len, start);
2267     - else
2268     - kfree(name);
2269     + if (ret)
2270     + goto error;
2271     +
2272     + pr_info("%s device: %#llx at %#llx\n", name, len, start);
2273     + return 0;
2274    
2275     +error:
2276     + kfree(name);
2277     return ret;
2278     }
2279    
2280     diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
2281     index 1efc643c9871..9341a8a592e8 100644
2282     --- a/drivers/mtd/lpddr/lpddr_cmds.c
2283     +++ b/drivers/mtd/lpddr/lpddr_cmds.c
2284     @@ -68,7 +68,6 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
2285     shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
2286     GFP_KERNEL);
2287     if (!shared) {
2288     - kfree(lpddr);
2289     kfree(mtd);
2290     return NULL;
2291     }
2292     diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
2293     index f64e3b6605c6..47c63968fa45 100644
2294     --- a/drivers/mtd/nand/raw/nand_base.c
2295     +++ b/drivers/mtd/nand/raw/nand_base.c
2296     @@ -5907,6 +5907,8 @@ void nand_cleanup(struct nand_chip *chip)
2297     chip->ecc.algo == NAND_ECC_BCH)
2298     nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
2299    
2300     + nanddev_cleanup(&chip->base);
2301     +
2302     /* Free bad block table memory */
2303     kfree(chip->bbt);
2304     kfree(chip->data_buf);
2305     diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
2306     index 5750c45019d8..8dda51bbdd11 100644
2307     --- a/drivers/mtd/nand/spi/core.c
2308     +++ b/drivers/mtd/nand/spi/core.c
2309     @@ -609,6 +609,7 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
2310     .ooboffs = 0,
2311     .ooblen = sizeof(marker),
2312     .oobbuf.out = marker,
2313     + .mode = MTD_OPS_RAW,
2314     };
2315     int ret;
2316    
2317     diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
2318     index 471837cf0b21..e15d18bb981e 100644
2319     --- a/drivers/net/dsa/bcm_sf2_cfp.c
2320     +++ b/drivers/net/dsa/bcm_sf2_cfp.c
2321     @@ -882,17 +882,14 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
2322     fs->m_ext.data[1]))
2323     return -EINVAL;
2324    
2325     - if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
2326     + if (fs->location != RX_CLS_LOC_ANY &&
2327     + fs->location > bcm_sf2_cfp_rule_size(priv))
2328     return -EINVAL;
2329    
2330     if (fs->location != RX_CLS_LOC_ANY &&
2331     test_bit(fs->location, priv->cfp.used))
2332     return -EBUSY;
2333    
2334     - if (fs->location != RX_CLS_LOC_ANY &&
2335     - fs->location > bcm_sf2_cfp_rule_size(priv))
2336     - return -EINVAL;
2337     -
2338     ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
2339     if (ret == 0)
2340     return -EEXIST;
2341     @@ -973,7 +970,7 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
2342     struct cfp_rule *rule;
2343     int ret;
2344    
2345     - if (loc >= CFP_NUM_RULES)
2346     + if (loc > bcm_sf2_cfp_rule_size(priv))
2347     return -EINVAL;
2348    
2349     /* Refuse deleting unused rules, and those that are not unique since
2350     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2351     index 304ddce6b087..39ee32518b10 100644
2352     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2353     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2354     @@ -1548,6 +1548,10 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
2355     int mode;
2356     int err;
2357    
2358     + if (bitmap_weight((unsigned long *)&fecparam->fec,
2359     + ETHTOOL_FEC_BASER_BIT + 1) > 1)
2360     + return -EOPNOTSUPP;
2361     +
2362     for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
2363     if (!(pplm_fec_2_ethtool[mode] & fecparam->fec))
2364     continue;
2365     diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
2366     index d47412dcdf38..5e5c6aafc070 100644
2367     --- a/drivers/nvdimm/bus.c
2368     +++ b/drivers/nvdimm/bus.c
2369     @@ -1010,8 +1010,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
2370     return -EFAULT;
2371     }
2372    
2373     - if (!desc || (desc->out_num + desc->in_num == 0) ||
2374     - !test_bit(cmd, &cmd_mask))
2375     + if (!desc ||
2376     + (desc->out_num + desc->in_num == 0) ||
2377     + cmd > ND_CMD_CALL ||
2378     + !test_bit(cmd, &cmd_mask))
2379     return -ENOTTY;
2380    
2381     /* fail write commands (when read-only) */
2382     diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
2383     index 9617b7df7c4d..1688f576ee8a 100644
2384     --- a/drivers/of/overlay.c
2385     +++ b/drivers/of/overlay.c
2386     @@ -261,6 +261,8 @@ static struct property *dup_and_fixup_symbol_prop(
2387    
2388     of_property_set_flag(new_prop, OF_DYNAMIC);
2389    
2390     + kfree(target_path);
2391     +
2392     return new_prop;
2393    
2394     err_free_new_prop:
2395     diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
2396     index ca7823eef2b4..5707c309a754 100644
2397     --- a/drivers/of/unittest.c
2398     +++ b/drivers/of/unittest.c
2399     @@ -776,6 +776,10 @@ static void __init of_unittest_changeset(void)
2400     unittest(!of_changeset_revert(&chgset), "revert failed\n");
2401    
2402     of_changeset_destroy(&chgset);
2403     +
2404     + of_node_put(n1);
2405     + of_node_put(n2);
2406     + of_node_put(n21);
2407     #endif
2408     }
2409    
2410     @@ -1061,10 +1065,13 @@ static void __init of_unittest_platform_populate(void)
2411    
2412     of_platform_populate(np, match, NULL, &test_bus->dev);
2413     for_each_child_of_node(np, child) {
2414     - for_each_child_of_node(child, grandchild)
2415     - unittest(of_find_device_by_node(grandchild),
2416     + for_each_child_of_node(child, grandchild) {
2417     + pdev = of_find_device_by_node(grandchild);
2418     + unittest(pdev,
2419     "Could not create device for node '%pOFn'\n",
2420     grandchild);
2421     + of_dev_put(pdev);
2422     + }
2423     }
2424    
2425     of_platform_depopulate(&test_bus->dev);
2426     @@ -2474,8 +2481,11 @@ static __init void of_unittest_overlay_high_level(void)
2427     goto err_unlock;
2428     }
2429     if (__of_add_property(of_symbols, new_prop)) {
2430     + kfree(new_prop->name);
2431     + kfree(new_prop->value);
2432     + kfree(new_prop);
2433     /* "name" auto-generated by unflatten */
2434     - if (!strcmp(new_prop->name, "name"))
2435     + if (!strcmp(prop->name, "name"))
2436     continue;
2437     unittest(0, "duplicate property '%s' in overlay_base node __symbols__",
2438     prop->name);
2439     diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c
2440     index ec231e40ef2a..a7577e316baf 100644
2441     --- a/drivers/phy/socionext/phy-uniphier-usb3ss.c
2442     +++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c
2443     @@ -314,6 +314,10 @@ static const struct of_device_id uniphier_u3ssphy_match[] = {
2444     .compatible = "socionext,uniphier-pro4-usb3-ssphy",
2445     .data = &uniphier_pro4_data,
2446     },
2447     + {
2448     + .compatible = "socionext,uniphier-pro5-usb3-ssphy",
2449     + .data = &uniphier_pro4_data,
2450     + },
2451     {
2452     .compatible = "socionext,uniphier-pxs2-usb3-ssphy",
2453     .data = &uniphier_pxs2_data,
2454     diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
2455     index e1bc4e6e6f30..f40fa0e63b6e 100644
2456     --- a/drivers/power/supply/axp288_fuel_gauge.c
2457     +++ b/drivers/power/supply/axp288_fuel_gauge.c
2458     @@ -706,14 +706,14 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
2459     {
2460     /* Intel Cherry Trail Compute Stick, Windows version */
2461     .matches = {
2462     - DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
2463     + DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
2464     DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"),
2465     },
2466     },
2467     {
2468     /* Intel Cherry Trail Compute Stick, version without an OS */
2469     .matches = {
2470     - DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
2471     + DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
2472     DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"),
2473     },
2474     },
2475     diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
2476     index 195c18c2f426..664e50103eaa 100644
2477     --- a/drivers/power/supply/bq27xxx_battery.c
2478     +++ b/drivers/power/supply/bq27xxx_battery.c
2479     @@ -1885,7 +1885,10 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
2480    
2481     di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg);
2482     if (IS_ERR(di->bat)) {
2483     - dev_err(di->dev, "failed to register battery\n");
2484     + if (PTR_ERR(di->bat) == -EPROBE_DEFER)
2485     + dev_dbg(di->dev, "failed to register battery, deferring probe\n");
2486     + else
2487     + dev_err(di->dev, "failed to register battery\n");
2488     return PTR_ERR(di->bat);
2489     }
2490    
2491     diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
2492     index 4743b16a8d84..1526402e126b 100644
2493     --- a/drivers/rtc/rtc-88pm860x.c
2494     +++ b/drivers/rtc/rtc-88pm860x.c
2495     @@ -336,6 +336,10 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
2496     info->dev = &pdev->dev;
2497     dev_set_drvdata(&pdev->dev, info);
2498    
2499     + info->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
2500     + if (IS_ERR(info->rtc_dev))
2501     + return PTR_ERR(info->rtc_dev);
2502     +
2503     ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
2504     rtc_update_handler, IRQF_ONESHOT, "rtc",
2505     info);
2506     @@ -377,13 +381,11 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
2507     }
2508     }
2509    
2510     - info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm860x-rtc",
2511     - &pm860x_rtc_ops, THIS_MODULE);
2512     - ret = PTR_ERR(info->rtc_dev);
2513     - if (IS_ERR(info->rtc_dev)) {
2514     - dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
2515     + info->rtc_dev->ops = &pm860x_rtc_ops;
2516     +
2517     + ret = rtc_register_device(info->rtc_dev);
2518     + if (ret)
2519     return ret;
2520     - }
2521    
2522     /*
2523     * enable internal XO instead of internal 3.25MHz clock since it can
2524     diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
2525     index cce757506383..94af30f768f7 100644
2526     --- a/drivers/scsi/sg.c
2527     +++ b/drivers/scsi/sg.c
2528     @@ -803,8 +803,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
2529     "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
2530     (int) cmnd[0], (int) hp->cmd_len));
2531    
2532     - if (hp->dxfer_len >= SZ_256M)
2533     + if (hp->dxfer_len >= SZ_256M) {
2534     + sg_remove_request(sfp, srp);
2535     return -EINVAL;
2536     + }
2537    
2538     k = sg_start_req(srp, cmnd);
2539     if (k) {
2540     diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
2541     index 98b9d9a902ae..90a8b2c0676f 100644
2542     --- a/drivers/soc/imx/gpc.c
2543     +++ b/drivers/soc/imx/gpc.c
2544     @@ -87,8 +87,8 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd)
2545     static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
2546     {
2547     struct imx_pm_domain *pd = to_imx_pm_domain(genpd);
2548     - int i, ret, sw, sw2iso;
2549     - u32 val;
2550     + int i, ret;
2551     + u32 val, req;
2552    
2553     if (pd->supply) {
2554     ret = regulator_enable(pd->supply);
2555     @@ -107,17 +107,18 @@ static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
2556     regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS,
2557     0x1, 0x1);
2558    
2559     - /* Read ISO and ISO2SW power up delays */
2560     - regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val);
2561     - sw = val & 0x3f;
2562     - sw2iso = (val >> 8) & 0x3f;
2563     -
2564     /* Request GPC to power up domain */
2565     - val = BIT(pd->cntr_pdn_bit + 1);
2566     - regmap_update_bits(pd->regmap, GPC_CNTR, val, val);
2567     + req = BIT(pd->cntr_pdn_bit + 1);
2568     + regmap_update_bits(pd->regmap, GPC_CNTR, req, req);
2569    
2570     - /* Wait ISO + ISO2SW IPG clock cycles */
2571     - udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz));
2572     + /* Wait for the PGC to handle the request */
2573     + ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req),
2574     + 1, 50);
2575     + if (ret)
2576     + pr_err("powerup request on domain %s timed out\n", genpd->name);
2577     +
2578     + /* Wait for reset to propagate through peripherals */
2579     + usleep_range(5, 10);
2580    
2581     /* Disable reset clocks for all devices in the domain */
2582     for (i = 0; i < pd->num_clks; i++)
2583     @@ -343,6 +344,7 @@ static const struct regmap_config imx_gpc_regmap_config = {
2584     .rd_table = &access_table,
2585     .wr_table = &access_table,
2586     .max_register = 0x2ac,
2587     + .fast_io = true,
2588     };
2589    
2590     static struct generic_pm_domain *imx_gpc_onecell_domains[] = {
2591     diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
2592     index 769e0a5d1dfc..3c6dd06ec5fb 100644
2593     --- a/drivers/tty/ehv_bytechan.c
2594     +++ b/drivers/tty/ehv_bytechan.c
2595     @@ -136,6 +136,21 @@ static int find_console_handle(void)
2596     return 1;
2597     }
2598    
2599     +static unsigned int local_ev_byte_channel_send(unsigned int handle,
2600     + unsigned int *count,
2601     + const char *p)
2602     +{
2603     + char buffer[EV_BYTE_CHANNEL_MAX_BYTES];
2604     + unsigned int c = *count;
2605     +
2606     + if (c < sizeof(buffer)) {
2607     + memcpy(buffer, p, c);
2608     + memset(&buffer[c], 0, sizeof(buffer) - c);
2609     + p = buffer;
2610     + }
2611     + return ev_byte_channel_send(handle, count, p);
2612     +}
2613     +
2614     /*************************** EARLY CONSOLE DRIVER ***************************/
2615    
2616     #ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC
2617     @@ -154,7 +169,7 @@ static void byte_channel_spin_send(const char data)
2618    
2619     do {
2620     count = 1;
2621     - ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
2622     + ret = local_ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
2623     &count, &data);
2624     } while (ret == EV_EAGAIN);
2625     }
2626     @@ -221,7 +236,7 @@ static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s,
2627     while (count) {
2628     len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES);
2629     do {
2630     - ret = ev_byte_channel_send(handle, &len, s);
2631     + ret = local_ev_byte_channel_send(handle, &len, s);
2632     } while (ret == EV_EAGAIN);
2633     count -= len;
2634     s += len;
2635     @@ -401,7 +416,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc)
2636     CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE),
2637     EV_BYTE_CHANNEL_MAX_BYTES);
2638    
2639     - ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail);
2640     + ret = local_ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail);
2641    
2642     /* 'len' is valid only if the return code is 0 or EV_EAGAIN */
2643     if (!ret || (ret == EV_EAGAIN))
2644     diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
2645     index e6a1c805064f..e72738371ecb 100644
2646     --- a/drivers/video/fbdev/core/fbmem.c
2647     +++ b/drivers/video/fbdev/core/fbmem.c
2648     @@ -662,20 +662,20 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
2649     fb_logo.depth = 1;
2650    
2651    
2652     - if (fb_logo.depth > 4 && depth > 4) {
2653     - switch (info->fix.visual) {
2654     - case FB_VISUAL_TRUECOLOR:
2655     - fb_logo.needs_truepalette = 1;
2656     - break;
2657     - case FB_VISUAL_DIRECTCOLOR:
2658     - fb_logo.needs_directpalette = 1;
2659     - fb_logo.needs_cmapreset = 1;
2660     - break;
2661     - case FB_VISUAL_PSEUDOCOLOR:
2662     - fb_logo.needs_cmapreset = 1;
2663     - break;
2664     - }
2665     - }
2666     + if (fb_logo.depth > 4 && depth > 4) {
2667     + switch (info->fix.visual) {
2668     + case FB_VISUAL_TRUECOLOR:
2669     + fb_logo.needs_truepalette = 1;
2670     + break;
2671     + case FB_VISUAL_DIRECTCOLOR:
2672     + fb_logo.needs_directpalette = 1;
2673     + fb_logo.needs_cmapreset = 1;
2674     + break;
2675     + case FB_VISUAL_PSEUDOCOLOR:
2676     + fb_logo.needs_cmapreset = 1;
2677     + break;
2678     + }
2679     + }
2680    
2681     height = fb_logo.logo->height;
2682     if (fb_center_logo)
2683     @@ -1060,19 +1060,19 @@ fb_blank(struct fb_info *info, int blank)
2684     struct fb_event event;
2685     int ret = -EINVAL;
2686    
2687     - if (blank > FB_BLANK_POWERDOWN)
2688     - blank = FB_BLANK_POWERDOWN;
2689     + if (blank > FB_BLANK_POWERDOWN)
2690     + blank = FB_BLANK_POWERDOWN;
2691    
2692     event.info = info;
2693     event.data = &blank;
2694    
2695     if (info->fbops->fb_blank)
2696     - ret = info->fbops->fb_blank(blank, info);
2697     + ret = info->fbops->fb_blank(blank, info);
2698    
2699     if (!ret)
2700     fb_notifier_call_chain(FB_EVENT_BLANK, &event);
2701    
2702     - return ret;
2703     + return ret;
2704     }
2705     EXPORT_SYMBOL(fb_blank);
2706    
2707     @@ -1110,7 +1110,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
2708     break;
2709     case FBIOGET_FSCREENINFO:
2710     lock_fb_info(info);
2711     - fix = info->fix;
2712     + memcpy(&fix, &info->fix, sizeof(fix));
2713     if (info->flags & FBINFO_HIDE_SMEM_START)
2714     fix.smem_start = 0;
2715     unlock_fb_info(info);
2716     diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
2717     index 53e04926a7b2..190d26e2e75f 100644
2718     --- a/drivers/watchdog/sp805_wdt.c
2719     +++ b/drivers/watchdog/sp805_wdt.c
2720     @@ -137,10 +137,14 @@ wdt_restart(struct watchdog_device *wdd, unsigned long mode, void *cmd)
2721     {
2722     struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
2723    
2724     + writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
2725     writel_relaxed(0, wdt->base + WDTCONTROL);
2726     writel_relaxed(0, wdt->base + WDTLOAD);
2727     writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
2728    
2729     + /* Flush posted writes. */
2730     + readl_relaxed(wdt->base + WDTLOCK);
2731     +
2732     return 0;
2733     }
2734    
2735     diff --git a/fs/afs/dir.c b/fs/afs/dir.c
2736     index 5c794f4b051a..d1e1caa23c8b 100644
2737     --- a/fs/afs/dir.c
2738     +++ b/fs/afs/dir.c
2739     @@ -1032,7 +1032,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
2740     struct dentry *parent;
2741     struct inode *inode;
2742     struct key *key;
2743     - afs_dataversion_t dir_version;
2744     + afs_dataversion_t dir_version, invalid_before;
2745     long de_version;
2746     int ret;
2747    
2748     @@ -1084,8 +1084,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
2749     if (de_version == (long)dir_version)
2750     goto out_valid_noupdate;
2751    
2752     - dir_version = dir->invalid_before;
2753     - if (de_version - (long)dir_version >= 0)
2754     + invalid_before = dir->invalid_before;
2755     + if (de_version - (long)invalid_before >= 0)
2756     goto out_valid;
2757    
2758     _debug("dir modified");
2759     @@ -1275,6 +1275,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2760     struct afs_fs_cursor fc;
2761     struct afs_vnode *dvnode = AFS_FS_I(dir);
2762     struct key *key;
2763     + afs_dataversion_t data_version;
2764     int ret;
2765    
2766     mode |= S_IFDIR;
2767     @@ -1295,7 +1296,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2768    
2769     ret = -ERESTARTSYS;
2770     if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
2771     - afs_dataversion_t data_version = dvnode->status.data_version + 1;
2772     + data_version = dvnode->status.data_version + 1;
2773    
2774     while (afs_select_fileserver(&fc)) {
2775     fc.cb_break = afs_calc_vnode_cb_break(dvnode);
2776     @@ -1316,10 +1317,14 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2777     goto error_key;
2778     }
2779    
2780     - if (ret == 0 &&
2781     - test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
2782     - afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
2783     - afs_edit_dir_for_create);
2784     + if (ret == 0) {
2785     + down_write(&dvnode->validate_lock);
2786     + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
2787     + dvnode->status.data_version == data_version)
2788     + afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
2789     + afs_edit_dir_for_create);
2790     + up_write(&dvnode->validate_lock);
2791     + }
2792    
2793     key_put(key);
2794     kfree(scb);
2795     @@ -1360,6 +1365,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
2796     struct afs_fs_cursor fc;
2797     struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL;
2798     struct key *key;
2799     + afs_dataversion_t data_version;
2800     int ret;
2801    
2802     _enter("{%llx:%llu},{%pd}",
2803     @@ -1391,7 +1397,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
2804    
2805     ret = -ERESTARTSYS;
2806     if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
2807     - afs_dataversion_t data_version = dvnode->status.data_version + 1;
2808     + data_version = dvnode->status.data_version + 1;
2809    
2810     while (afs_select_fileserver(&fc)) {
2811     fc.cb_break = afs_calc_vnode_cb_break(dvnode);
2812     @@ -1404,9 +1410,12 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
2813     ret = afs_end_vnode_operation(&fc);
2814     if (ret == 0) {
2815     afs_dir_remove_subdir(dentry);
2816     - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
2817     + down_write(&dvnode->validate_lock);
2818     + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
2819     + dvnode->status.data_version == data_version)
2820     afs_edit_dir_remove(dvnode, &dentry->d_name,
2821     afs_edit_dir_for_rmdir);
2822     + up_write(&dvnode->validate_lock);
2823     }
2824     }
2825    
2826     @@ -1544,10 +1553,15 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
2827     ret = afs_end_vnode_operation(&fc);
2828     if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
2829     ret = afs_dir_remove_link(dvnode, dentry, key);
2830     - if (ret == 0 &&
2831     - test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
2832     - afs_edit_dir_remove(dvnode, &dentry->d_name,
2833     - afs_edit_dir_for_unlink);
2834     +
2835     + if (ret == 0) {
2836     + down_write(&dvnode->validate_lock);
2837     + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
2838     + dvnode->status.data_version == data_version)
2839     + afs_edit_dir_remove(dvnode, &dentry->d_name,
2840     + afs_edit_dir_for_unlink);
2841     + up_write(&dvnode->validate_lock);
2842     + }
2843     }
2844    
2845     if (need_rehash && ret < 0 && ret != -ENOENT)
2846     @@ -1573,6 +1587,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2847     struct afs_status_cb *scb;
2848     struct afs_vnode *dvnode = AFS_FS_I(dir);
2849     struct key *key;
2850     + afs_dataversion_t data_version;
2851     int ret;
2852    
2853     mode |= S_IFREG;
2854     @@ -1597,7 +1612,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2855    
2856     ret = -ERESTARTSYS;
2857     if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
2858     - afs_dataversion_t data_version = dvnode->status.data_version + 1;
2859     + data_version = dvnode->status.data_version + 1;
2860    
2861     while (afs_select_fileserver(&fc)) {
2862     fc.cb_break = afs_calc_vnode_cb_break(dvnode);
2863     @@ -1618,9 +1633,12 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2864     goto error_key;
2865     }
2866    
2867     - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
2868     + down_write(&dvnode->validate_lock);
2869     + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
2870     + dvnode->status.data_version == data_version)
2871     afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
2872     afs_edit_dir_for_create);
2873     + up_write(&dvnode->validate_lock);
2874    
2875     kfree(scb);
2876     key_put(key);
2877     @@ -1648,6 +1666,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
2878     struct afs_vnode *dvnode = AFS_FS_I(dir);
2879     struct afs_vnode *vnode = AFS_FS_I(d_inode(from));
2880     struct key *key;
2881     + afs_dataversion_t data_version;
2882     int ret;
2883    
2884     _enter("{%llx:%llu},{%llx:%llu},{%pd}",
2885     @@ -1672,7 +1691,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
2886    
2887     ret = -ERESTARTSYS;
2888     if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
2889     - afs_dataversion_t data_version = dvnode->status.data_version + 1;
2890     + data_version = dvnode->status.data_version + 1;
2891    
2892     if (mutex_lock_interruptible_nested(&vnode->io_lock, 1) < 0) {
2893     afs_end_vnode_operation(&fc);
2894     @@ -1702,9 +1721,12 @@ static int afs_link(struct dentry *from, struct inode *dir,
2895     goto error_key;
2896     }
2897    
2898     - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
2899     + down_write(&dvnode->validate_lock);
2900     + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
2901     + dvnode->status.data_version == data_version)
2902     afs_edit_dir_add(dvnode, &dentry->d_name, &vnode->fid,
2903     afs_edit_dir_for_link);
2904     + up_write(&dvnode->validate_lock);
2905    
2906     key_put(key);
2907     kfree(scb);
2908     @@ -1732,6 +1754,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
2909     struct afs_status_cb *scb;
2910     struct afs_vnode *dvnode = AFS_FS_I(dir);
2911     struct key *key;
2912     + afs_dataversion_t data_version;
2913     int ret;
2914    
2915     _enter("{%llx:%llu},{%pd},%s",
2916     @@ -1759,7 +1782,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
2917    
2918     ret = -ERESTARTSYS;
2919     if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
2920     - afs_dataversion_t data_version = dvnode->status.data_version + 1;
2921     + data_version = dvnode->status.data_version + 1;
2922    
2923     while (afs_select_fileserver(&fc)) {
2924     fc.cb_break = afs_calc_vnode_cb_break(dvnode);
2925     @@ -1780,9 +1803,12 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
2926     goto error_key;
2927     }
2928    
2929     - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
2930     + down_write(&dvnode->validate_lock);
2931     + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
2932     + dvnode->status.data_version == data_version)
2933     afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
2934     afs_edit_dir_for_symlink);
2935     + up_write(&dvnode->validate_lock);
2936    
2937     key_put(key);
2938     kfree(scb);
2939     @@ -1812,6 +1838,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
2940     struct dentry *tmp = NULL, *rehash = NULL;
2941     struct inode *new_inode;
2942     struct key *key;
2943     + afs_dataversion_t orig_data_version;
2944     + afs_dataversion_t new_data_version;
2945     bool new_negative = d_is_negative(new_dentry);
2946     int ret;
2947    
2948     @@ -1890,10 +1918,6 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
2949    
2950     ret = -ERESTARTSYS;
2951     if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
2952     - afs_dataversion_t orig_data_version;
2953     - afs_dataversion_t new_data_version;
2954     - struct afs_status_cb *new_scb = &scb[1];
2955     -
2956     orig_data_version = orig_dvnode->status.data_version + 1;
2957    
2958     if (orig_dvnode != new_dvnode) {
2959     @@ -1904,7 +1928,6 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
2960     new_data_version = new_dvnode->status.data_version + 1;
2961     } else {
2962     new_data_version = orig_data_version;
2963     - new_scb = &scb[0];
2964     }
2965    
2966     while (afs_select_fileserver(&fc)) {
2967     @@ -1912,7 +1935,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
2968     fc.cb_break_2 = afs_calc_vnode_cb_break(new_dvnode);
2969     afs_fs_rename(&fc, old_dentry->d_name.name,
2970     new_dvnode, new_dentry->d_name.name,
2971     - &scb[0], new_scb);
2972     + &scb[0], &scb[1]);
2973     }
2974    
2975     afs_vnode_commit_status(&fc, orig_dvnode, fc.cb_break,
2976     @@ -1930,18 +1953,25 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
2977     if (ret == 0) {
2978     if (rehash)
2979     d_rehash(rehash);
2980     - if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags))
2981     - afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name,
2982     - afs_edit_dir_for_rename_0);
2983     + down_write(&orig_dvnode->validate_lock);
2984     + if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
2985     + orig_dvnode->status.data_version == orig_data_version)
2986     + afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name,
2987     + afs_edit_dir_for_rename_0);
2988     + if (orig_dvnode != new_dvnode) {
2989     + up_write(&orig_dvnode->validate_lock);
2990    
2991     - if (!new_negative &&
2992     - test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags))
2993     - afs_edit_dir_remove(new_dvnode, &new_dentry->d_name,
2994     - afs_edit_dir_for_rename_1);
2995     + down_write(&new_dvnode->validate_lock);
2996     + }
2997     + if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) &&
2998     + orig_dvnode->status.data_version == new_data_version) {
2999     + if (!new_negative)
3000     + afs_edit_dir_remove(new_dvnode, &new_dentry->d_name,
3001     + afs_edit_dir_for_rename_1);
3002    
3003     - if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags))
3004     afs_edit_dir_add(new_dvnode, &new_dentry->d_name,
3005     &vnode->fid, afs_edit_dir_for_rename_2);
3006     + }
3007    
3008     new_inode = d_inode(new_dentry);
3009     if (new_inode) {
3010     @@ -1957,14 +1987,10 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
3011     * Note that if we ever implement RENAME_EXCHANGE, we'll have
3012     * to update both dentries with opposing dir versions.
3013     */
3014     - if (new_dvnode != orig_dvnode) {
3015     - afs_update_dentry_version(&fc, old_dentry, &scb[1]);
3016     - afs_update_dentry_version(&fc, new_dentry, &scb[1]);
3017     - } else {
3018     - afs_update_dentry_version(&fc, old_dentry, &scb[0]);
3019     - afs_update_dentry_version(&fc, new_dentry, &scb[0]);
3020     - }
3021     + afs_update_dentry_version(&fc, old_dentry, &scb[1]);
3022     + afs_update_dentry_version(&fc, new_dentry, &scb[1]);
3023     d_move(old_dentry, new_dentry);
3024     + up_write(&new_dvnode->validate_lock);
3025     goto error_tmp;
3026     }
3027    
3028     diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
3029     index 361088a5edb9..d94e2b7cddff 100644
3030     --- a/fs/afs/dir_silly.c
3031     +++ b/fs/afs/dir_silly.c
3032     @@ -21,6 +21,7 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
3033     {
3034     struct afs_fs_cursor fc;
3035     struct afs_status_cb *scb;
3036     + afs_dataversion_t dir_data_version;
3037     int ret = -ERESTARTSYS;
3038    
3039     _enter("%pd,%pd", old, new);
3040     @@ -31,7 +32,7 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
3041    
3042     trace_afs_silly_rename(vnode, false);
3043     if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
3044     - afs_dataversion_t dir_data_version = dvnode->status.data_version + 1;
3045     + dir_data_version = dvnode->status.data_version + 1;
3046    
3047     while (afs_select_fileserver(&fc)) {
3048     fc.cb_break = afs_calc_vnode_cb_break(dvnode);
3049     @@ -54,12 +55,15 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
3050     dvnode->silly_key = key_get(key);
3051     }
3052    
3053     - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
3054     + down_write(&dvnode->validate_lock);
3055     + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
3056     + dvnode->status.data_version == dir_data_version) {
3057     afs_edit_dir_remove(dvnode, &old->d_name,
3058     afs_edit_dir_for_silly_0);
3059     - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
3060     afs_edit_dir_add(dvnode, &new->d_name,
3061     &vnode->fid, afs_edit_dir_for_silly_1);
3062     + }
3063     + up_write(&dvnode->validate_lock);
3064     }
3065    
3066     kfree(scb);
3067     @@ -181,10 +185,14 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
3068     clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
3069     }
3070     }
3071     - if (ret == 0 &&
3072     - test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
3073     - afs_edit_dir_remove(dvnode, &dentry->d_name,
3074     - afs_edit_dir_for_unlink);
3075     + if (ret == 0) {
3076     + down_write(&dvnode->validate_lock);
3077     + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
3078     + dvnode->status.data_version == dir_data_version)
3079     + afs_edit_dir_remove(dvnode, &dentry->d_name,
3080     + afs_edit_dir_for_unlink);
3081     + up_write(&dvnode->validate_lock);
3082     + }
3083     }
3084    
3085     kfree(scb);
3086     diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
3087     index 6f84231f11a5..6805a469d13c 100644
3088     --- a/fs/afs/fsclient.c
3089     +++ b/fs/afs/fsclient.c
3090     @@ -65,6 +65,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
3091     bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus);
3092     u64 data_version, size;
3093     u32 type, abort_code;
3094     + int ret;
3095    
3096     abort_code = ntohl(xdr->abort_code);
3097    
3098     @@ -78,7 +79,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
3099     */
3100     status->abort_code = abort_code;
3101     scb->have_error = true;
3102     - return 0;
3103     + goto good;
3104     }
3105    
3106     pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version));
3107     @@ -87,7 +88,8 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
3108    
3109     if (abort_code != 0 && inline_error) {
3110     status->abort_code = abort_code;
3111     - return 0;
3112     + scb->have_error = true;
3113     + goto good;
3114     }
3115    
3116     type = ntohl(xdr->type);
3117     @@ -123,13 +125,16 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
3118     data_version |= (u64)ntohl(xdr->data_version_hi) << 32;
3119     status->data_version = data_version;
3120     scb->have_status = true;
3121     -
3122     +good:
3123     + ret = 0;
3124     +advance:
3125     *_bp = (const void *)*_bp + sizeof(*xdr);
3126     - return 0;
3127     + return ret;
3128    
3129     bad:
3130     xdr_dump_bad(*_bp);
3131     - return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
3132     + ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
3133     + goto advance;
3134     }
3135    
3136     static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
3137     @@ -983,16 +988,16 @@ static int afs_deliver_fs_rename(struct afs_call *call)
3138     if (ret < 0)
3139     return ret;
3140    
3141     - /* unmarshall the reply once we've received all of it */
3142     + /* If the two dirs are the same, we have two copies of the same status
3143     + * report, so we just decode it twice.
3144     + */
3145     bp = call->buffer;
3146     ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
3147     if (ret < 0)
3148     return ret;
3149     - if (call->out_dir_scb != call->out_scb) {
3150     - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
3151     - if (ret < 0)
3152     - return ret;
3153     - }
3154     + ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
3155     + if (ret < 0)
3156     + return ret;
3157     xdr_decode_AFSVolSync(&bp, call->out_volsync);
3158    
3159     _leave(" = 0 [done]");
3160     diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
3161     index 3ee7abf4b2d0..31b236c6b1f7 100644
3162     --- a/fs/afs/yfsclient.c
3163     +++ b/fs/afs/yfsclient.c
3164     @@ -186,13 +186,14 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp,
3165     const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp;
3166     struct afs_file_status *status = &scb->status;
3167     u32 type;
3168     + int ret;
3169    
3170     status->abort_code = ntohl(xdr->abort_code);
3171     if (status->abort_code != 0) {
3172     if (status->abort_code == VNOVNODE)
3173     status->nlink = 0;
3174     scb->have_error = true;
3175     - return 0;
3176     + goto good;
3177     }
3178    
3179     type = ntohl(xdr->type);
3180     @@ -220,13 +221,16 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp,
3181     status->size = xdr_to_u64(xdr->size);
3182     status->data_version = xdr_to_u64(xdr->data_version);
3183     scb->have_status = true;
3184     -
3185     +good:
3186     + ret = 0;
3187     +advance:
3188     *_bp += xdr_size(xdr);
3189     - return 0;
3190     + return ret;
3191    
3192     bad:
3193     xdr_dump_bad(*_bp);
3194     - return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
3195     + ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
3196     + goto advance;
3197     }
3198    
3199     /*
3200     @@ -1154,11 +1158,9 @@ static int yfs_deliver_fs_rename(struct afs_call *call)
3201     ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
3202     if (ret < 0)
3203     return ret;
3204     - if (call->out_dir_scb != call->out_scb) {
3205     - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
3206     - if (ret < 0)
3207     - return ret;
3208     - }
3209     + ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
3210     + if (ret < 0)
3211     + return ret;
3212    
3213     xdr_decode_YFSVolSync(&bp, call->out_volsync);
3214     _leave(" = 0 [done]");
3215     diff --git a/fs/block_dev.c b/fs/block_dev.c
3216     index d612468ee66b..34644ce4b502 100644
3217     --- a/fs/block_dev.c
3218     +++ b/fs/block_dev.c
3219     @@ -34,6 +34,7 @@
3220     #include <linux/task_io_accounting_ops.h>
3221     #include <linux/falloc.h>
3222     #include <linux/uaccess.h>
3223     +#include <linux/suspend.h>
3224     #include "internal.h"
3225    
3226     struct bdev_inode {
3227     @@ -1975,7 +1976,8 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
3228     if (bdev_read_only(I_BDEV(bd_inode)))
3229     return -EPERM;
3230    
3231     - if (IS_SWAPFILE(bd_inode))
3232     + /* uswsusp needs write permission to the swap */
3233     + if (IS_SWAPFILE(bd_inode) && !hibernation_available())
3234     return -ETXTBSY;
3235    
3236     if (!iov_iter_count(from))
3237     diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
3238     index 7dcfa7d7632a..95330f40f998 100644
3239     --- a/fs/btrfs/block-group.c
3240     +++ b/fs/btrfs/block-group.c
3241     @@ -1829,6 +1829,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
3242     }
3243     }
3244    
3245     + rcu_read_lock();
3246     list_for_each_entry_rcu(space_info, &info->space_info, list) {
3247     if (!(btrfs_get_alloc_profile(info, space_info->flags) &
3248     (BTRFS_BLOCK_GROUP_RAID10 |
3249     @@ -1849,6 +1850,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
3250     list)
3251     inc_block_group_ro(cache, 1);
3252     }
3253     + rcu_read_unlock();
3254    
3255     btrfs_init_global_block_rsv(info);
3256     ret = check_chunk_block_group_mappings(info);
3257     diff --git a/fs/buffer.c b/fs/buffer.c
3258     index 91ceca52d14f..79c9562434a8 100644
3259     --- a/fs/buffer.c
3260     +++ b/fs/buffer.c
3261     @@ -1337,6 +1337,17 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
3262     }
3263     EXPORT_SYMBOL(__breadahead);
3264    
3265     +void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
3266     + gfp_t gfp)
3267     +{
3268     + struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
3269     + if (likely(bh)) {
3270     + ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
3271     + brelse(bh);
3272     + }
3273     +}
3274     +EXPORT_SYMBOL(__breadahead_gfp);
3275     +
3276     /**
3277     * __bread_gfp() - reads a specified block and returns the bh
3278     * @bdev: the block_device to read from
3279     diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
3280     index e67a43fd037c..fe1552cc8a0a 100644
3281     --- a/fs/cifs/transport.c
3282     +++ b/fs/cifs/transport.c
3283     @@ -466,7 +466,7 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
3284     struct smb_rqst *rqst, int flags)
3285     {
3286     struct kvec iov;
3287     - struct smb2_transform_hdr tr_hdr;
3288     + struct smb2_transform_hdr *tr_hdr;
3289     struct smb_rqst cur_rqst[MAX_COMPOUND];
3290     int rc;
3291    
3292     @@ -476,28 +476,34 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
3293     if (num_rqst > MAX_COMPOUND - 1)
3294     return -ENOMEM;
3295    
3296     - memset(&cur_rqst[0], 0, sizeof(cur_rqst));
3297     - memset(&iov, 0, sizeof(iov));
3298     - memset(&tr_hdr, 0, sizeof(tr_hdr));
3299     -
3300     - iov.iov_base = &tr_hdr;
3301     - iov.iov_len = sizeof(tr_hdr);
3302     - cur_rqst[0].rq_iov = &iov;
3303     - cur_rqst[0].rq_nvec = 1;
3304     -
3305     if (!server->ops->init_transform_rq) {
3306     cifs_server_dbg(VFS, "Encryption requested but transform "
3307     "callback is missing\n");
3308     return -EIO;
3309     }
3310    
3311     + tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
3312     + if (!tr_hdr)
3313     + return -ENOMEM;
3314     +
3315     + memset(&cur_rqst[0], 0, sizeof(cur_rqst));
3316     + memset(&iov, 0, sizeof(iov));
3317     + memset(tr_hdr, 0, sizeof(*tr_hdr));
3318     +
3319     + iov.iov_base = tr_hdr;
3320     + iov.iov_len = sizeof(*tr_hdr);
3321     + cur_rqst[0].rq_iov = &iov;
3322     + cur_rqst[0].rq_nvec = 1;
3323     +
3324     rc = server->ops->init_transform_rq(server, num_rqst + 1,
3325     &cur_rqst[0], rqst);
3326     if (rc)
3327     - return rc;
3328     + goto out;
3329    
3330     rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
3331     smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3332     +out:
3333     + kfree(tr_hdr);
3334     return rc;
3335     }
3336    
3337     diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
3338     index 0456bc990b5e..62acbe27d8bf 100644
3339     --- a/fs/ext2/xattr.c
3340     +++ b/fs/ext2/xattr.c
3341     @@ -56,6 +56,7 @@
3342    
3343     #include <linux/buffer_head.h>
3344     #include <linux/init.h>
3345     +#include <linux/printk.h>
3346     #include <linux/slab.h>
3347     #include <linux/mbcache.h>
3348     #include <linux/quotaops.h>
3349     @@ -84,8 +85,8 @@
3350     printk("\n"); \
3351     } while (0)
3352     #else
3353     -# define ea_idebug(f...)
3354     -# define ea_bdebug(f...)
3355     +# define ea_idebug(inode, f...) no_printk(f)
3356     +# define ea_bdebug(bh, f...) no_printk(f)
3357     #endif
3358    
3359     static int ext2_xattr_set2(struct inode *, struct buffer_head *,
3360     @@ -864,8 +865,7 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
3361     true);
3362     if (error) {
3363     if (error == -EBUSY) {
3364     - ea_bdebug(bh, "already in cache (%d cache entries)",
3365     - atomic_read(&ext2_xattr_cache->c_entry_count));
3366     + ea_bdebug(bh, "already in cache");
3367     error = 0;
3368     }
3369     } else
3370     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3371     index d2edd6e9072f..b9473fcc110f 100644
3372     --- a/fs/ext4/inode.c
3373     +++ b/fs/ext4/inode.c
3374     @@ -4680,7 +4680,7 @@ make_io:
3375     if (end > table)
3376     end = table;
3377     while (b <= end)
3378     - sb_breadahead(sb, b++);
3379     + sb_breadahead_unmovable(sb, b++);
3380     }
3381    
3382     /*
3383     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3384     index a1eca6d30643..53d4c67a20df 100644
3385     --- a/fs/ext4/super.c
3386     +++ b/fs/ext4/super.c
3387     @@ -389,7 +389,8 @@ static void save_error_info(struct super_block *sb, const char *func,
3388     unsigned int line)
3389     {
3390     __save_error_info(sb, func, line);
3391     - ext4_commit_super(sb, 1);
3392     + if (!bdev_read_only(sb->s_bdev))
3393     + ext4_commit_super(sb, 1);
3394     }
3395    
3396     /*
3397     @@ -4283,7 +4284,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3398     /* Pre-read the descriptors into the buffer cache */
3399     for (i = 0; i < db_count; i++) {
3400     block = descriptor_loc(sb, logical_sb_block, i);
3401     - sb_breadahead(sb, block);
3402     + sb_breadahead_unmovable(sb, block);
3403     }
3404    
3405     for (i = 0; i < db_count; i++) {
3406     diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
3407     index a0eef95b9e0e..a28ffecc0f95 100644
3408     --- a/fs/f2fs/checkpoint.c
3409     +++ b/fs/f2fs/checkpoint.c
3410     @@ -1250,20 +1250,20 @@ static void unblock_operations(struct f2fs_sb_info *sbi)
3411     f2fs_unlock_all(sbi);
3412     }
3413    
3414     -void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
3415     +void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
3416     {
3417     DEFINE_WAIT(wait);
3418    
3419     for (;;) {
3420     prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
3421    
3422     - if (!get_pages(sbi, F2FS_WB_CP_DATA))
3423     + if (!get_pages(sbi, type))
3424     break;
3425    
3426     if (unlikely(f2fs_cp_error(sbi)))
3427     break;
3428    
3429     - io_schedule_timeout(5*HZ);
3430     + io_schedule_timeout(HZ/50);
3431     }
3432     finish_wait(&sbi->cp_wait, &wait);
3433     }
3434     @@ -1301,10 +1301,14 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3435     else
3436     __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
3437    
3438     - if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
3439     - is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
3440     + if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
3441     __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
3442    
3443     + if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
3444     + __set_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
3445     + else
3446     + __clear_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
3447     +
3448     if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
3449     __set_ckpt_flags(ckpt, CP_DISABLED_FLAG);
3450     else
3451     @@ -1384,8 +1388,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3452    
3453     /* Flush all the NAT/SIT pages */
3454     f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
3455     - f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
3456     - !f2fs_cp_error(sbi));
3457    
3458     /*
3459     * modify checkpoint
3460     @@ -1493,11 +1495,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3461    
3462     /* Here, we have one bio having CP pack except cp pack 2 page */
3463     f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
3464     - f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
3465     - !f2fs_cp_error(sbi));
3466     + /* Wait for all dirty meta pages to be submitted for IO */
3467     + f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META);
3468    
3469     /* wait for previous submitted meta pages writeback */
3470     - f2fs_wait_on_all_pages_writeback(sbi);
3471     + f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
3472    
3473     /* flush all device cache */
3474     err = f2fs_flush_device_cache(sbi);
3475     @@ -1506,7 +1508,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3476    
3477     /* barrier and flush checkpoint cp pack 2 page if it can */
3478     commit_checkpoint(sbi, ckpt, start_blk);
3479     - f2fs_wait_on_all_pages_writeback(sbi);
3480     + f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
3481    
3482     /*
3483     * invalidate intermediate page cache borrowed from meta inode
3484     diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
3485     index 9046432b87c2..3edde3d6d089 100644
3486     --- a/fs/f2fs/f2fs.h
3487     +++ b/fs/f2fs/f2fs.h
3488     @@ -100,6 +100,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
3489     #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
3490     #define F2FS_MOUNT_RESERVE_ROOT 0x01000000
3491     #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
3492     +#define F2FS_MOUNT_NORECOVERY 0x04000000
3493    
3494     #define F2FS_OPTION(sbi) ((sbi)->mount_opt)
3495     #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
3496     @@ -3185,7 +3186,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3497     void f2fs_update_dirty_page(struct inode *inode, struct page *page);
3498     void f2fs_remove_dirty_inode(struct inode *inode);
3499     int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
3500     -void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi);
3501     +void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3502     int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3503     void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3504     int __init f2fs_create_checkpoint_caches(void);
3505     diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
3506     index 5877bd729689..e611d768efde 100644
3507     --- a/fs/f2fs/gc.c
3508     +++ b/fs/f2fs/gc.c
3509     @@ -1532,11 +1532,17 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
3510     goto out;
3511     }
3512    
3513     + mutex_lock(&sbi->cp_mutex);
3514     update_fs_metadata(sbi, -secs);
3515     clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
3516     + set_sbi_flag(sbi, SBI_IS_DIRTY);
3517     + mutex_unlock(&sbi->cp_mutex);
3518     +
3519     err = f2fs_sync_fs(sbi->sb, 1);
3520     if (err) {
3521     + mutex_lock(&sbi->cp_mutex);
3522     update_fs_metadata(sbi, secs);
3523     + mutex_unlock(&sbi->cp_mutex);
3524     update_sb_metadata(sbi, secs);
3525     f2fs_commit_super(sbi, false);
3526     }
3527     diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
3528     index 8b66bc4c004b..f14401a77d60 100644
3529     --- a/fs/f2fs/node.c
3530     +++ b/fs/f2fs/node.c
3531     @@ -1562,15 +1562,16 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
3532     if (atomic && !test_opt(sbi, NOBARRIER))
3533     fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
3534    
3535     - set_page_writeback(page);
3536     - ClearPageError(page);
3537     -
3538     + /* should add to global list before clearing PAGECACHE status */
3539     if (f2fs_in_warm_node_list(sbi, page)) {
3540     seq = f2fs_add_fsync_node_entry(sbi, page);
3541     if (seq_id)
3542     *seq_id = seq;
3543     }
3544    
3545     + set_page_writeback(page);
3546     + ClearPageError(page);
3547     +
3548     fio.old_blkaddr = ni.blk_addr;
3549     f2fs_do_write_node_page(nid, &fio);
3550     set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
3551     diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
3552     index ea8dbf1458c9..5e1d4d9243a9 100644
3553     --- a/fs/f2fs/super.c
3554     +++ b/fs/f2fs/super.c
3555     @@ -439,7 +439,7 @@ static int parse_options(struct super_block *sb, char *options)
3556     break;
3557     case Opt_norecovery:
3558     /* this option mounts f2fs with ro */
3559     - set_opt(sbi, DISABLE_ROLL_FORWARD);
3560     + set_opt(sbi, NORECOVERY);
3561     if (!f2fs_readonly(sb))
3562     return -EINVAL;
3563     break;
3564     @@ -1105,7 +1105,7 @@ static void f2fs_put_super(struct super_block *sb)
3565     /* our cp_error case, we can wait for any writeback page */
3566     f2fs_flush_merged_writes(sbi);
3567    
3568     - f2fs_wait_on_all_pages_writeback(sbi);
3569     + f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
3570    
3571     f2fs_bug_on(sbi, sbi->fsync_node_num);
3572    
3573     @@ -1348,6 +1348,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
3574     }
3575     if (test_opt(sbi, DISABLE_ROLL_FORWARD))
3576     seq_puts(seq, ",disable_roll_forward");
3577     + if (test_opt(sbi, NORECOVERY))
3578     + seq_puts(seq, ",norecovery");
3579     if (test_opt(sbi, DISCARD))
3580     seq_puts(seq, ",discard");
3581     else
3582     @@ -1824,6 +1826,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
3583     int offset = off & (sb->s_blocksize - 1);
3584     size_t towrite = len;
3585     struct page *page;
3586     + void *fsdata = NULL;
3587     char *kaddr;
3588     int err = 0;
3589     int tocopy;
3590     @@ -1833,7 +1836,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
3591     towrite);
3592     retry:
3593     err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
3594     - &page, NULL);
3595     + &page, &fsdata);
3596     if (unlikely(err)) {
3597     if (err == -ENOMEM) {
3598     congestion_wait(BLK_RW_ASYNC, HZ/50);
3599     @@ -1849,7 +1852,7 @@ retry:
3600     flush_dcache_page(page);
3601    
3602     a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
3603     - page, NULL);
3604     + page, fsdata);
3605     offset = 0;
3606     towrite -= tocopy;
3607     off += tocopy;
3608     @@ -3488,7 +3491,8 @@ try_onemore:
3609     goto reset_checkpoint;
3610    
3611     /* recover fsynced data */
3612     - if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
3613     + if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
3614     + !test_opt(sbi, NORECOVERY)) {
3615     /*
3616     * mount should be failed, when device has readonly mode, and
3617     * previous checkpoint was not done by clean system shutdown.
3618     diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
3619     index f39924ba050b..fc775b0b5194 100644
3620     --- a/fs/nfs/callback_proc.c
3621     +++ b/fs/nfs/callback_proc.c
3622     @@ -130,6 +130,8 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
3623    
3624     list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
3625     list_for_each_entry(lo, &server->layouts, plh_layouts) {
3626     + if (!pnfs_layout_is_valid(lo))
3627     + continue;
3628     if (stateid != NULL &&
3629     !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
3630     continue;
3631     diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
3632     index 29f00da8a0b7..6b0bf4ebd812 100644
3633     --- a/fs/nfs/direct.c
3634     +++ b/fs/nfs/direct.c
3635     @@ -571,6 +571,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
3636     l_ctx = nfs_get_lock_context(dreq->ctx);
3637     if (IS_ERR(l_ctx)) {
3638     result = PTR_ERR(l_ctx);
3639     + nfs_direct_req_release(dreq);
3640     goto out_release;
3641     }
3642     dreq->l_ctx = l_ctx;
3643     @@ -989,6 +990,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
3644     l_ctx = nfs_get_lock_context(dreq->ctx);
3645     if (IS_ERR(l_ctx)) {
3646     result = PTR_ERR(l_ctx);
3647     + nfs_direct_req_release(dreq);
3648     goto out_release;
3649     }
3650     dreq->l_ctx = l_ctx;
3651     diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
3652     index 2a03bfeec10a..3802c88e8372 100644
3653     --- a/fs/nfs/inode.c
3654     +++ b/fs/nfs/inode.c
3655     @@ -959,16 +959,16 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry,
3656     struct file *filp)
3657     {
3658     struct nfs_open_context *ctx;
3659     - const struct cred *cred = get_current_cred();
3660    
3661     ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
3662     - if (!ctx) {
3663     - put_cred(cred);
3664     + if (!ctx)
3665     return ERR_PTR(-ENOMEM);
3666     - }
3667     nfs_sb_active(dentry->d_sb);
3668     ctx->dentry = dget(dentry);
3669     - ctx->cred = cred;
3670     + if (filp)
3671     + ctx->cred = get_cred(filp->f_cred);
3672     + else
3673     + ctx->cred = get_current_cred();
3674     ctx->ll_cred = NULL;
3675     ctx->state = NULL;
3676     ctx->mode = f_mode;
3677     diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
3678     index 54f1c1f626fc..fb55c04cdc6b 100644
3679     --- a/fs/nfs/nfs4file.c
3680     +++ b/fs/nfs/nfs4file.c
3681     @@ -210,6 +210,9 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
3682     if (remap_flags & ~REMAP_FILE_ADVISORY)
3683     return -EINVAL;
3684    
3685     + if (IS_SWAPFILE(dst_inode) || IS_SWAPFILE(src_inode))
3686     + return -ETXTBSY;
3687     +
3688     /* check alignment w.r.t. clone_blksize */
3689     ret = -EINVAL;
3690     if (bs) {
3691     diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
3692     index 8b7c525dbbf7..b736912098ee 100644
3693     --- a/fs/nfs/pagelist.c
3694     +++ b/fs/nfs/pagelist.c
3695     @@ -886,15 +886,6 @@ static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
3696     pgio->pg_mirror_count = mirror_count;
3697     }
3698    
3699     -/*
3700     - * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
3701     - */
3702     -void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
3703     -{
3704     - pgio->pg_mirror_count = 1;
3705     - pgio->pg_mirror_idx = 0;
3706     -}
3707     -
3708     static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
3709     {
3710     pgio->pg_mirror_count = 1;
3711     @@ -1320,6 +1311,14 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
3712     }
3713     }
3714    
3715     +/*
3716     + * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
3717     + */
3718     +void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
3719     +{
3720     + nfs_pageio_complete(pgio);
3721     +}
3722     +
3723     int __init nfs_init_nfspagecache(void)
3724     {
3725     nfs_page_cachep = kmem_cache_create("nfs_page",
3726     diff --git a/include/acpi/processor.h b/include/acpi/processor.h
3727     index 47805172e73d..683e124ad517 100644
3728     --- a/include/acpi/processor.h
3729     +++ b/include/acpi/processor.h
3730     @@ -297,6 +297,14 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
3731     }
3732     #endif
3733    
3734     +static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg,
3735     + bool direct)
3736     +{
3737     + if (direct || (is_percpu_thread() && cpu == smp_processor_id()))
3738     + return fn(arg);
3739     + return work_on_cpu(cpu, fn, arg);
3740     +}
3741     +
3742     /* in processor_perflib.c */
3743    
3744     #ifdef CONFIG_CPU_FREQ
3745     diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
3746     index 18d8e2d8210f..53759d2b9c26 100644
3747     --- a/include/asm-generic/mshyperv.h
3748     +++ b/include/asm-generic/mshyperv.h
3749     @@ -163,7 +163,7 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
3750     return nr_bank;
3751     }
3752    
3753     -void hyperv_report_panic(struct pt_regs *regs, long err);
3754     +void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
3755     void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
3756     bool hv_is_hyperv_initialized(void);
3757     void hyperv_cleanup(void);
3758     diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h
3759     index f6a7ba4dccd4..3fee04f81439 100644
3760     --- a/include/keys/big_key-type.h
3761     +++ b/include/keys/big_key-type.h
3762     @@ -17,6 +17,6 @@ extern void big_key_free_preparse(struct key_preparsed_payload *prep);
3763     extern void big_key_revoke(struct key *key);
3764     extern void big_key_destroy(struct key *key);
3765     extern void big_key_describe(const struct key *big_key, struct seq_file *m);
3766     -extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen);
3767     +extern long big_key_read(const struct key *key, char *buffer, size_t buflen);
3768    
3769     #endif /* _KEYS_BIG_KEY_TYPE_H */
3770     diff --git a/include/keys/user-type.h b/include/keys/user-type.h
3771     index d5e73266a81a..be61fcddc02a 100644
3772     --- a/include/keys/user-type.h
3773     +++ b/include/keys/user-type.h
3774     @@ -41,8 +41,7 @@ extern int user_update(struct key *key, struct key_preparsed_payload *prep);
3775     extern void user_revoke(struct key *key);
3776     extern void user_destroy(struct key *key);
3777     extern void user_describe(const struct key *user, struct seq_file *m);
3778     -extern long user_read(const struct key *key,
3779     - char __user *buffer, size_t buflen);
3780     +extern long user_read(const struct key *key, char *buffer, size_t buflen);
3781    
3782     static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key)
3783     {
3784     diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
3785     index 7b73ef7f902d..b56cc825f64d 100644
3786     --- a/include/linux/buffer_head.h
3787     +++ b/include/linux/buffer_head.h
3788     @@ -189,6 +189,8 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
3789     void __brelse(struct buffer_head *);
3790     void __bforget(struct buffer_head *);
3791     void __breadahead(struct block_device *, sector_t block, unsigned int size);
3792     +void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
3793     + gfp_t gfp);
3794     struct buffer_head *__bread_gfp(struct block_device *,
3795     sector_t block, unsigned size, gfp_t gfp);
3796     void invalidate_bh_lrus(void);
3797     @@ -319,6 +321,12 @@ sb_breadahead(struct super_block *sb, sector_t block)
3798     __breadahead(sb->s_bdev, block, sb->s_blocksize);
3799     }
3800    
3801     +static inline void
3802     +sb_breadahead_unmovable(struct super_block *sb, sector_t block)
3803     +{
3804     + __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
3805     +}
3806     +
3807     static inline struct buffer_head *
3808     sb_getblk(struct super_block *sb, sector_t block)
3809     {
3810     diff --git a/include/linux/compiler.h b/include/linux/compiler.h
3811     index 5e88e7e33abe..034b0a644efc 100644
3812     --- a/include/linux/compiler.h
3813     +++ b/include/linux/compiler.h
3814     @@ -347,7 +347,7 @@ static inline void *offset_to_ptr(const int *off)
3815     * compiler has support to do so.
3816     */
3817     #define compiletime_assert(condition, msg) \
3818     - _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
3819     + _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
3820    
3821     #define compiletime_assert_atomic_type(t) \
3822     compiletime_assert(__native_word(t), \
3823     diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
3824     index 284738996028..6bb6f718a102 100644
3825     --- a/include/linux/f2fs_fs.h
3826     +++ b/include/linux/f2fs_fs.h
3827     @@ -124,6 +124,7 @@ struct f2fs_super_block {
3828     /*
3829     * For checkpoint
3830     */
3831     +#define CP_RESIZEFS_FLAG 0x00004000
3832     #define CP_DISABLED_QUICK_FLAG 0x00002000
3833     #define CP_DISABLED_FLAG 0x00001000
3834     #define CP_QUOTA_NEED_FSCK_FLAG 0x00000800
3835     diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
3836     index 53fc34f930d0..8a03f392f368 100644
3837     --- a/include/linux/hugetlb.h
3838     +++ b/include/linux/hugetlb.h
3839     @@ -298,7 +298,10 @@ static inline bool is_file_hugepages(struct file *file)
3840     return is_file_shm_hugepages(file);
3841     }
3842    
3843     -
3844     +static inline struct hstate *hstate_inode(struct inode *i)
3845     +{
3846     + return HUGETLBFS_SB(i->i_sb)->hstate;
3847     +}
3848     #else /* !CONFIG_HUGETLBFS */
3849    
3850     #define is_file_hugepages(file) false
3851     @@ -310,6 +313,10 @@ hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
3852     return ERR_PTR(-ENOSYS);
3853     }
3854    
3855     +static inline struct hstate *hstate_inode(struct inode *i)
3856     +{
3857     + return NULL;
3858     +}
3859     #endif /* !CONFIG_HUGETLBFS */
3860    
3861     #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
3862     @@ -379,11 +386,6 @@ extern unsigned int default_hstate_idx;
3863    
3864     #define default_hstate (hstates[default_hstate_idx])
3865    
3866     -static inline struct hstate *hstate_inode(struct inode *i)
3867     -{
3868     - return HUGETLBFS_SB(i->i_sb)->hstate;
3869     -}
3870     -
3871     static inline struct hstate *hstate_file(struct file *f)
3872     {
3873     return hstate_inode(file_inode(f));
3874     @@ -636,11 +638,6 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
3875     return NULL;
3876     }
3877    
3878     -static inline struct hstate *hstate_inode(struct inode *i)
3879     -{
3880     - return NULL;
3881     -}
3882     -
3883     static inline struct hstate *page_hstate(struct page *page)
3884     {
3885     return NULL;
3886     diff --git a/include/linux/key-type.h b/include/linux/key-type.h
3887     index 4ded94bcf274..2ab2d6d6aeab 100644
3888     --- a/include/linux/key-type.h
3889     +++ b/include/linux/key-type.h
3890     @@ -127,7 +127,7 @@ struct key_type {
3891     * much is copied into the buffer
3892     * - shouldn't do the copy if the buffer is NULL
3893     */
3894     - long (*read)(const struct key *key, char __user *buffer, size_t buflen);
3895     + long (*read)(const struct key *key, char *buffer, size_t buflen);
3896    
3897     /* handle request_key() for this type instead of invoking
3898     * /sbin/request-key (optional)
3899     diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
3900     index 4f052496cdfd..0a4f54dd4737 100644
3901     --- a/include/linux/percpu_counter.h
3902     +++ b/include/linux/percpu_counter.h
3903     @@ -78,9 +78,9 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc)
3904     */
3905     static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
3906     {
3907     - s64 ret = fbc->count;
3908     + /* Prevent reloads of fbc->count */
3909     + s64 ret = READ_ONCE(fbc->count);
3910    
3911     - barrier(); /* Prevent reloads of fbc->count */
3912     if (ret >= 0)
3913     return ret;
3914     return 0;
3915     diff --git a/include/linux/swapops.h b/include/linux/swapops.h
3916     index 877fd239b6ff..3208a520d0be 100644
3917     --- a/include/linux/swapops.h
3918     +++ b/include/linux/swapops.h
3919     @@ -348,7 +348,8 @@ static inline void num_poisoned_pages_inc(void)
3920     }
3921     #endif
3922    
3923     -#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
3924     +#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \
3925     + defined(CONFIG_DEVICE_PRIVATE)
3926     static inline int non_swap_entry(swp_entry_t entry)
3927     {
3928     return swp_type(entry) >= MAX_SWAPFILES;
3929     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3930     index 013780ef0bd7..e1a65303cfd7 100644
3931     --- a/kernel/bpf/verifier.c
3932     +++ b/kernel/bpf/verifier.c
3933     @@ -201,8 +201,7 @@ struct bpf_call_arg_meta {
3934     bool pkt_access;
3935     int regno;
3936     int access_size;
3937     - s64 msize_smax_value;
3938     - u64 msize_umax_value;
3939     + u64 msize_max_value;
3940     int ref_obj_id;
3941     int func_id;
3942     };
3943     @@ -3377,8 +3376,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
3944     /* remember the mem_size which may be used later
3945     * to refine return values.
3946     */
3947     - meta->msize_smax_value = reg->smax_value;
3948     - meta->msize_umax_value = reg->umax_value;
3949     + meta->msize_max_value = reg->umax_value;
3950    
3951     /* The register is SCALAR_VALUE; the access check
3952     * happens using its boundaries.
3953     @@ -3866,21 +3864,44 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
3954     return 0;
3955     }
3956    
3957     -static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
3958     - int func_id,
3959     - struct bpf_call_arg_meta *meta)
3960     +static int do_refine_retval_range(struct bpf_verifier_env *env,
3961     + struct bpf_reg_state *regs, int ret_type,
3962     + int func_id, struct bpf_call_arg_meta *meta)
3963     {
3964     struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
3965     + struct bpf_reg_state tmp_reg = *ret_reg;
3966     + bool ret;
3967    
3968     if (ret_type != RET_INTEGER ||
3969     (func_id != BPF_FUNC_get_stack &&
3970     func_id != BPF_FUNC_probe_read_str))
3971     - return;
3972     + return 0;
3973     +
3974     + /* Error case where ret is in interval [S32MIN, -1]. */
3975     + ret_reg->smin_value = S32_MIN;
3976     + ret_reg->smax_value = -1;
3977     +
3978     + __reg_deduce_bounds(ret_reg);
3979     + __reg_bound_offset(ret_reg);
3980     + __update_reg_bounds(ret_reg);
3981     +
3982     + ret = push_stack(env, env->insn_idx + 1, env->insn_idx, false);
3983     + if (!ret)
3984     + return -EFAULT;
3985     +
3986     + *ret_reg = tmp_reg;
3987     +
3988     + /* Success case where ret is in range [0, msize_max_value]. */
3989     + ret_reg->smin_value = 0;
3990     + ret_reg->smax_value = meta->msize_max_value;
3991     + ret_reg->umin_value = ret_reg->smin_value;
3992     + ret_reg->umax_value = ret_reg->smax_value;
3993    
3994     - ret_reg->smax_value = meta->msize_smax_value;
3995     - ret_reg->umax_value = meta->msize_umax_value;
3996     __reg_deduce_bounds(ret_reg);
3997     __reg_bound_offset(ret_reg);
3998     + __update_reg_bounds(ret_reg);
3999     +
4000     + return 0;
4001     }
4002    
4003     static int
4004     @@ -4112,7 +4133,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
4005     regs[BPF_REG_0].ref_obj_id = id;
4006     }
4007    
4008     - do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
4009     + err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta);
4010     + if (err)
4011     + return err;
4012    
4013     err = check_map_func_compatibility(env, meta.map_ptr, func_id);
4014     if (err)
4015     diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
4016     index 551b0eb7028a..2a0c4985f38e 100644
4017     --- a/kernel/dma/coherent.c
4018     +++ b/kernel/dma/coherent.c
4019     @@ -134,7 +134,7 @@ static void *__dma_alloc_from_coherent(struct device *dev,
4020    
4021     spin_lock_irqsave(&mem->spinlock, flags);
4022    
4023     - if (unlikely(size > (mem->size << PAGE_SHIFT)))
4024     + if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
4025     goto err;
4026    
4027     pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
4028     @@ -144,8 +144,9 @@ static void *__dma_alloc_from_coherent(struct device *dev,
4029     /*
4030     * Memory was found in the coherent area.
4031     */
4032     - *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
4033     - ret = mem->virt_base + (pageno << PAGE_SHIFT);
4034     + *dma_handle = dma_get_device_base(dev, mem) +
4035     + ((dma_addr_t)pageno << PAGE_SHIFT);
4036     + ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
4037     spin_unlock_irqrestore(&mem->spinlock, flags);
4038     memset(ret, 0, size);
4039     return ret;
4040     @@ -194,7 +195,7 @@ static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
4041     int order, void *vaddr)
4042     {
4043     if (mem && vaddr >= mem->virt_base && vaddr <
4044     - (mem->virt_base + (mem->size << PAGE_SHIFT))) {
4045     + (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
4046     int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
4047     unsigned long flags;
4048    
4049     @@ -238,10 +239,10 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
4050     struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
4051     {
4052     if (mem && vaddr >= mem->virt_base && vaddr + size <=
4053     - (mem->virt_base + (mem->size << PAGE_SHIFT))) {
4054     + (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
4055     unsigned long off = vma->vm_pgoff;
4056     int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
4057     - int user_count = vma_pages(vma);
4058     + unsigned long user_count = vma_pages(vma);
4059     int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
4060    
4061     *ret = -ENXIO;
4062     diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
4063     index 4ad74f5987ea..cb6425e52bf7 100644
4064     --- a/kernel/dma/debug.c
4065     +++ b/kernel/dma/debug.c
4066     @@ -137,9 +137,12 @@ static const char *const maperr2str[] = {
4067     [MAP_ERR_CHECKED] = "dma map error checked",
4068     };
4069    
4070     -static const char *type2name[5] = { "single", "page",
4071     - "scather-gather", "coherent",
4072     - "resource" };
4073     +static const char *type2name[] = {
4074     + [dma_debug_single] = "single",
4075     + [dma_debug_sg] = "scather-gather",
4076     + [dma_debug_coherent] = "coherent",
4077     + [dma_debug_resource] = "resource",
4078     +};
4079    
4080     static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
4081     "DMA_FROM_DEVICE", "DMA_NONE" };
4082     diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
4083     index c513031cd7e3..e09562818bb7 100644
4084     --- a/kernel/locking/locktorture.c
4085     +++ b/kernel/locking/locktorture.c
4086     @@ -697,10 +697,10 @@ static void __torture_print_stats(char *page,
4087     if (statp[i].n_lock_fail)
4088     fail = true;
4089     sum += statp[i].n_lock_acquired;
4090     - if (max < statp[i].n_lock_fail)
4091     - max = statp[i].n_lock_fail;
4092     - if (min > statp[i].n_lock_fail)
4093     - min = statp[i].n_lock_fail;
4094     + if (max < statp[i].n_lock_acquired)
4095     + max = statp[i].n_lock_acquired;
4096     + if (min > statp[i].n_lock_acquired)
4097     + min = statp[i].n_lock_acquired;
4098     }
4099     page += sprintf(page,
4100     "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
4101     diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
4102     index f61d834e02fe..6118d99117da 100644
4103     --- a/lib/Kconfig.debug
4104     +++ b/lib/Kconfig.debug
4105     @@ -223,6 +223,8 @@ config DEBUG_INFO_DWARF4
4106     config DEBUG_INFO_BTF
4107     bool "Generate BTF typeinfo"
4108     depends on DEBUG_INFO
4109     + depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
4110     + depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
4111     help
4112     Generate deduplicated BTF type information from DWARF debug info.
4113     Turning this on expects presence of pahole tool, which will convert
4114     diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
4115     index 3e1a90669006..ad53eb31d40f 100644
4116     --- a/net/dns_resolver/dns_key.c
4117     +++ b/net/dns_resolver/dns_key.c
4118     @@ -302,7 +302,7 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
4119     * - the key's semaphore is read-locked
4120     */
4121     static long dns_resolver_read(const struct key *key,
4122     - char __user *buffer, size_t buflen)
4123     + char *buffer, size_t buflen)
4124     {
4125     int err = PTR_ERR(key->payload.data[dns_key_error]);
4126    
4127     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4128     index 068daff41f6e..f7129232c825 100644
4129     --- a/net/netfilter/nf_tables_api.c
4130     +++ b/net/netfilter/nf_tables_api.c
4131     @@ -3598,7 +3598,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
4132     NFT_SET_INTERVAL | NFT_SET_TIMEOUT |
4133     NFT_SET_MAP | NFT_SET_EVAL |
4134     NFT_SET_OBJECT))
4135     - return -EINVAL;
4136     + return -EOPNOTSUPP;
4137     /* Only one of these operations is supported */
4138     if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
4139     (NFT_SET_MAP | NFT_SET_OBJECT))
4140     @@ -3636,7 +3636,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
4141     objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE]));
4142     if (objtype == NFT_OBJECT_UNSPEC ||
4143     objtype > NFT_OBJECT_MAX)
4144     - return -EINVAL;
4145     + return -EOPNOTSUPP;
4146     } else if (flags & NFT_SET_OBJECT)
4147     return -EINVAL;
4148     else
4149     diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
4150     index 6c3f35fac42d..0c98313dd7a8 100644
4151     --- a/net/rxrpc/key.c
4152     +++ b/net/rxrpc/key.c
4153     @@ -31,7 +31,7 @@ static void rxrpc_free_preparse_s(struct key_preparsed_payload *);
4154     static void rxrpc_destroy(struct key *);
4155     static void rxrpc_destroy_s(struct key *);
4156     static void rxrpc_describe(const struct key *, struct seq_file *);
4157     -static long rxrpc_read(const struct key *, char __user *, size_t);
4158     +static long rxrpc_read(const struct key *, char *, size_t);
4159    
4160     /*
4161     * rxrpc defined keys take an arbitrary string as the description and an
4162     @@ -1042,12 +1042,12 @@ EXPORT_SYMBOL(rxrpc_get_null_key);
4163     * - this returns the result in XDR form
4164     */
4165     static long rxrpc_read(const struct key *key,
4166     - char __user *buffer, size_t buflen)
4167     + char *buffer, size_t buflen)
4168     {
4169     const struct rxrpc_key_token *token;
4170     const struct krb5_principal *princ;
4171     size_t size;
4172     - __be32 __user *xdr, *oldxdr;
4173     + __be32 *xdr, *oldxdr;
4174     u32 cnlen, toksize, ntoks, tok, zero;
4175     u16 toksizes[AFSTOKEN_MAX];
4176     int loop;
4177     @@ -1124,30 +1124,25 @@ static long rxrpc_read(const struct key *key,
4178     if (!buffer || buflen < size)
4179     return size;
4180    
4181     - xdr = (__be32 __user *) buffer;
4182     + xdr = (__be32 *)buffer;
4183     zero = 0;
4184     #define ENCODE(x) \
4185     do { \
4186     - __be32 y = htonl(x); \
4187     - if (put_user(y, xdr++) < 0) \
4188     - goto fault; \
4189     + *xdr++ = htonl(x); \
4190     } while(0)
4191     #define ENCODE_DATA(l, s) \
4192     do { \
4193     u32 _l = (l); \
4194     ENCODE(l); \
4195     - if (copy_to_user(xdr, (s), _l) != 0) \
4196     - goto fault; \
4197     - if (_l & 3 && \
4198     - copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \
4199     - goto fault; \
4200     + memcpy(xdr, (s), _l); \
4201     + if (_l & 3) \
4202     + memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \
4203     xdr += (_l + 3) >> 2; \
4204     } while(0)
4205     #define ENCODE64(x) \
4206     do { \
4207     __be64 y = cpu_to_be64(x); \
4208     - if (copy_to_user(xdr, &y, 8) != 0) \
4209     - goto fault; \
4210     + memcpy(xdr, &y, 8); \
4211     xdr += 8 >> 2; \
4212     } while(0)
4213     #define ENCODE_STR(s) \
4214     @@ -1238,8 +1233,4 @@ static long rxrpc_read(const struct key *key,
4215     ASSERTCMP((char __user *) xdr - buffer, ==, size);
4216     _leave(" = %zu", size);
4217     return size;
4218     -
4219     -fault:
4220     - _leave(" = -EFAULT");
4221     - return -EFAULT;
4222     }
4223     diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
4224     index d75fddca44c9..ff5fcb3e1208 100644
4225     --- a/net/sunrpc/auth_gss/auth_gss.c
4226     +++ b/net/sunrpc/auth_gss/auth_gss.c
4227     @@ -20,6 +20,7 @@
4228     #include <linux/sunrpc/clnt.h>
4229     #include <linux/sunrpc/auth.h>
4230     #include <linux/sunrpc/auth_gss.h>
4231     +#include <linux/sunrpc/gss_krb5.h>
4232     #include <linux/sunrpc/svcauth_gss.h>
4233     #include <linux/sunrpc/gss_err.h>
4234     #include <linux/workqueue.h>
4235     @@ -1050,7 +1051,7 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
4236     goto err_put_mech;
4237     auth = &gss_auth->rpc_auth;
4238     auth->au_cslack = GSS_CRED_SLACK >> 2;
4239     - auth->au_rslack = GSS_VERF_SLACK >> 2;
4240     + auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2;
4241     auth->au_verfsize = GSS_VERF_SLACK >> 2;
4242     auth->au_ralign = GSS_VERF_SLACK >> 2;
4243     auth->au_flags = 0;
4244     @@ -1934,35 +1935,69 @@ gss_unwrap_resp_auth(struct rpc_cred *cred)
4245     return 0;
4246     }
4247    
4248     +/*
4249     + * RFC 2203, Section 5.3.2.2
4250     + *
4251     + * struct rpc_gss_integ_data {
4252     + * opaque databody_integ<>;
4253     + * opaque checksum<>;
4254     + * };
4255     + *
4256     + * struct rpc_gss_data_t {
4257     + * unsigned int seq_num;
4258     + * proc_req_arg_t arg;
4259     + * };
4260     + */
4261     static int
4262     gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
4263     struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
4264     struct xdr_stream *xdr)
4265     {
4266     - struct xdr_buf integ_buf, *rcv_buf = &rqstp->rq_rcv_buf;
4267     - u32 data_offset, mic_offset, integ_len, maj_stat;
4268     + struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf;
4269     struct rpc_auth *auth = cred->cr_auth;
4270     + u32 len, offset, seqno, maj_stat;
4271     struct xdr_netobj mic;
4272     - __be32 *p;
4273     + int ret;
4274    
4275     - p = xdr_inline_decode(xdr, 2 * sizeof(*p));
4276     - if (unlikely(!p))
4277     + ret = -EIO;
4278     + mic.data = NULL;
4279     +
4280     + /* opaque databody_integ<>; */
4281     + if (xdr_stream_decode_u32(xdr, &len))
4282     goto unwrap_failed;
4283     - integ_len = be32_to_cpup(p++);
4284     - if (integ_len & 3)
4285     + if (len & 3)
4286     goto unwrap_failed;
4287     - data_offset = (u8 *)(p) - (u8 *)rcv_buf->head[0].iov_base;
4288     - mic_offset = integ_len + data_offset;
4289     - if (mic_offset > rcv_buf->len)
4290     + offset = rcv_buf->len - xdr_stream_remaining(xdr);
4291     + if (xdr_stream_decode_u32(xdr, &seqno))
4292     goto unwrap_failed;
4293     - if (be32_to_cpup(p) != rqstp->rq_seqno)
4294     + if (seqno != rqstp->rq_seqno)
4295     goto bad_seqno;
4296     + if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len))
4297     + goto unwrap_failed;
4298    
4299     - if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, integ_len))
4300     + /*
4301     + * The xdr_stream now points to the beginning of the
4302     + * upper layer payload, to be passed below to
4303     + * rpcauth_unwrap_resp_decode(). The checksum, which
4304     + * follows the upper layer payload in @rcv_buf, is
4305     + * located and parsed without updating the xdr_stream.
4306     + */
4307     +
4308     + /* opaque checksum<>; */
4309     + offset += len;
4310     + if (xdr_decode_word(rcv_buf, offset, &len))
4311     + goto unwrap_failed;
4312     + offset += sizeof(__be32);
4313     + if (offset + len > rcv_buf->len)
4314     goto unwrap_failed;
4315     - if (xdr_buf_read_mic(rcv_buf, &mic, mic_offset))
4316     + mic.len = len;
4317     + mic.data = kmalloc(len, GFP_NOFS);
4318     + if (!mic.data)
4319     + goto unwrap_failed;
4320     + if (read_bytes_from_xdr_buf(rcv_buf, offset, mic.data, mic.len))
4321     goto unwrap_failed;
4322     - maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
4323     +
4324     + maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &gss_data, &mic);
4325     if (maj_stat == GSS_S_CONTEXT_EXPIRED)
4326     clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
4327     if (maj_stat != GSS_S_COMPLETE)
4328     @@ -1970,16 +2005,21 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
4329    
4330     auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len);
4331     auth->au_ralign = auth->au_verfsize + 2;
4332     - return 0;
4333     + ret = 0;
4334     +
4335     +out:
4336     + kfree(mic.data);
4337     + return ret;
4338     +
4339     unwrap_failed:
4340     trace_rpcgss_unwrap_failed(task);
4341     - return -EIO;
4342     + goto out;
4343     bad_seqno:
4344     - trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(p));
4345     - return -EIO;
4346     + trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno);
4347     + goto out;
4348     bad_mic:
4349     trace_rpcgss_verify_mic(task, maj_stat);
4350     - return -EIO;
4351     + goto out;
4352     }
4353    
4354     static int
4355     diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
4356     index 3049af269fbf..c5dba371a765 100644
4357     --- a/net/xdp/xdp_umem.c
4358     +++ b/net/xdp/xdp_umem.c
4359     @@ -343,7 +343,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
4360     u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
4361     unsigned int chunks, chunks_per_page;
4362     u64 addr = mr->addr, size = mr->len;
4363     - int size_chk, err;
4364     + int err;
4365    
4366     if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
4367     /* Strictly speaking we could support this, if:
4368     @@ -382,8 +382,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
4369     return -EINVAL;
4370     }
4371    
4372     - size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
4373     - if (size_chk < 0)
4374     + if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
4375     return -EINVAL;
4376    
4377     umem->address = (unsigned long)addr;
4378     diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
4379     index d426fc01c529..7181a30666b4 100644
4380     --- a/net/xdp/xsk.c
4381     +++ b/net/xdp/xsk.c
4382     @@ -129,8 +129,9 @@ static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
4383     u64 page_start = addr & ~(PAGE_SIZE - 1);
4384     u64 first_len = PAGE_SIZE - (addr - page_start);
4385    
4386     - memcpy(to_buf, from_buf, first_len + metalen);
4387     - memcpy(next_pg_addr, from_buf + first_len, len - first_len);
4388     + memcpy(to_buf, from_buf, first_len);
4389     + memcpy(next_pg_addr, from_buf + first_len,
4390     + len + metalen - first_len);
4391    
4392     return;
4393     }
4394     diff --git a/security/keys/big_key.c b/security/keys/big_key.c
4395     index 001abe530a0d..82008f900930 100644
4396     --- a/security/keys/big_key.c
4397     +++ b/security/keys/big_key.c
4398     @@ -352,7 +352,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
4399     * read the key data
4400     * - the key's semaphore is read-locked
4401     */
4402     -long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
4403     +long big_key_read(const struct key *key, char *buffer, size_t buflen)
4404     {
4405     size_t datalen = (size_t)key->payload.data[big_key_len];
4406     long ret;
4407     @@ -391,9 +391,8 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
4408    
4409     ret = datalen;
4410    
4411     - /* copy decrypted data to user */
4412     - if (copy_to_user(buffer, buf->virt, datalen) != 0)
4413     - ret = -EFAULT;
4414     + /* copy out decrypted data */
4415     + memcpy(buffer, buf->virt, datalen);
4416    
4417     err_fput:
4418     fput(file);
4419     @@ -401,9 +400,7 @@ error:
4420     big_key_free_buffer(buf);
4421     } else {
4422     ret = datalen;
4423     - if (copy_to_user(buffer, key->payload.data[big_key_data],
4424     - datalen) != 0)
4425     - ret = -EFAULT;
4426     + memcpy(buffer, key->payload.data[big_key_data], datalen);
4427     }
4428    
4429     return ret;
4430     diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
4431     index 60720f58cbe0..f6797ba44bf7 100644
4432     --- a/security/keys/encrypted-keys/encrypted.c
4433     +++ b/security/keys/encrypted-keys/encrypted.c
4434     @@ -902,14 +902,14 @@ out:
4435     }
4436    
4437     /*
4438     - * encrypted_read - format and copy the encrypted data to userspace
4439     + * encrypted_read - format and copy out the encrypted data
4440     *
4441     * The resulting datablob format is:
4442     * <master-key name> <decrypted data length> <encrypted iv> <encrypted data>
4443     *
4444     * On success, return to userspace the encrypted key datablob size.
4445     */
4446     -static long encrypted_read(const struct key *key, char __user *buffer,
4447     +static long encrypted_read(const struct key *key, char *buffer,
4448     size_t buflen)
4449     {
4450     struct encrypted_key_payload *epayload;
4451     @@ -957,8 +957,7 @@ static long encrypted_read(const struct key *key, char __user *buffer,
4452     key_put(mkey);
4453     memzero_explicit(derived_key, sizeof(derived_key));
4454    
4455     - if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0)
4456     - ret = -EFAULT;
4457     + memcpy(buffer, ascii_buf, asciiblob_len);
4458     kzfree(ascii_buf);
4459    
4460     return asciiblob_len;
4461     diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
4462     index d1a3dea58dee..106e16f9006b 100644
4463     --- a/security/keys/keyctl.c
4464     +++ b/security/keys/keyctl.c
4465     @@ -797,6 +797,21 @@ error:
4466     return ret;
4467     }
4468    
4469     +/*
4470     + * Call the read method
4471     + */
4472     +static long __keyctl_read_key(struct key *key, char *buffer, size_t buflen)
4473     +{
4474     + long ret;
4475     +
4476     + down_read(&key->sem);
4477     + ret = key_validate(key);
4478     + if (ret == 0)
4479     + ret = key->type->read(key, buffer, buflen);
4480     + up_read(&key->sem);
4481     + return ret;
4482     +}
4483     +
4484     /*
4485     * Read a key's payload.
4486     *
4487     @@ -812,26 +827,27 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
4488     struct key *key;
4489     key_ref_t key_ref;
4490     long ret;
4491     + char *key_data;
4492    
4493     /* find the key first */
4494     key_ref = lookup_user_key(keyid, 0, 0);
4495     if (IS_ERR(key_ref)) {
4496     ret = -ENOKEY;
4497     - goto error;
4498     + goto out;
4499     }
4500    
4501     key = key_ref_to_ptr(key_ref);
4502    
4503     ret = key_read_state(key);
4504     if (ret < 0)
4505     - goto error2; /* Negatively instantiated */
4506     + goto key_put_out; /* Negatively instantiated */
4507    
4508     /* see if we can read it directly */
4509     ret = key_permission(key_ref, KEY_NEED_READ);
4510     if (ret == 0)
4511     goto can_read_key;
4512     if (ret != -EACCES)
4513     - goto error2;
4514     + goto key_put_out;
4515    
4516     /* we can't; see if it's searchable from this process's keyrings
4517     * - we automatically take account of the fact that it may be
4518     @@ -839,26 +855,51 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
4519     */
4520     if (!is_key_possessed(key_ref)) {
4521     ret = -EACCES;
4522     - goto error2;
4523     + goto key_put_out;
4524     }
4525    
4526     /* the key is probably readable - now try to read it */
4527     can_read_key:
4528     - ret = -EOPNOTSUPP;
4529     - if (key->type->read) {
4530     - /* Read the data with the semaphore held (since we might sleep)
4531     - * to protect against the key being updated or revoked.
4532     - */
4533     - down_read(&key->sem);
4534     - ret = key_validate(key);
4535     - if (ret == 0)
4536     - ret = key->type->read(key, buffer, buflen);
4537     - up_read(&key->sem);
4538     + if (!key->type->read) {
4539     + ret = -EOPNOTSUPP;
4540     + goto key_put_out;
4541     }
4542    
4543     -error2:
4544     + if (!buffer || !buflen) {
4545     + /* Get the key length from the read method */
4546     + ret = __keyctl_read_key(key, NULL, 0);
4547     + goto key_put_out;
4548     + }
4549     +
4550     + /*
4551     + * Read the data with the semaphore held (since we might sleep)
4552     + * to protect against the key being updated or revoked.
4553     + *
4554     + * Allocating a temporary buffer to hold the keys before
4555     + * transferring them to user buffer to avoid potential
4556     + * deadlock involving page fault and mmap_sem.
4557     + */
4558     + key_data = kmalloc(buflen, GFP_KERNEL);
4559     +
4560     + if (!key_data) {
4561     + ret = -ENOMEM;
4562     + goto key_put_out;
4563     + }
4564     + ret = __keyctl_read_key(key, key_data, buflen);
4565     +
4566     + /*
4567     + * Read methods will just return the required length without
4568     + * any copying if the provided length isn't large enough.
4569     + */
4570     + if (ret > 0 && ret <= buflen) {
4571     + if (copy_to_user(buffer, key_data, ret))
4572     + ret = -EFAULT;
4573     + }
4574     + kzfree(key_data);
4575     +
4576     +key_put_out:
4577     key_put(key);
4578     -error:
4579     +out:
4580     return ret;
4581     }
4582    
4583     diff --git a/security/keys/keyring.c b/security/keys/keyring.c
4584     index febf36c6ddc5..5ca620d31cd3 100644
4585     --- a/security/keys/keyring.c
4586     +++ b/security/keys/keyring.c
4587     @@ -459,7 +459,6 @@ static int keyring_read_iterator(const void *object, void *data)
4588     {
4589     struct keyring_read_iterator_context *ctx = data;
4590     const struct key *key = keyring_ptr_to_key(object);
4591     - int ret;
4592    
4593     kenter("{%s,%d},,{%zu/%zu}",
4594     key->type->name, key->serial, ctx->count, ctx->buflen);
4595     @@ -467,10 +466,7 @@ static int keyring_read_iterator(const void *object, void *data)
4596     if (ctx->count >= ctx->buflen)
4597     return 1;
4598    
4599     - ret = put_user(key->serial, ctx->buffer);
4600     - if (ret < 0)
4601     - return ret;
4602     - ctx->buffer++;
4603     + *ctx->buffer++ = key->serial;
4604     ctx->count += sizeof(key->serial);
4605     return 0;
4606     }
4607     diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
4608     index ecba39c93fd9..41e9735006d0 100644
4609     --- a/security/keys/request_key_auth.c
4610     +++ b/security/keys/request_key_auth.c
4611     @@ -22,7 +22,7 @@ static int request_key_auth_instantiate(struct key *,
4612     static void request_key_auth_describe(const struct key *, struct seq_file *);
4613     static void request_key_auth_revoke(struct key *);
4614     static void request_key_auth_destroy(struct key *);
4615     -static long request_key_auth_read(const struct key *, char __user *, size_t);
4616     +static long request_key_auth_read(const struct key *, char *, size_t);
4617    
4618     /*
4619     * The request-key authorisation key type definition.
4620     @@ -80,7 +80,7 @@ static void request_key_auth_describe(const struct key *key,
4621     * - the key's semaphore is read-locked
4622     */
4623     static long request_key_auth_read(const struct key *key,
4624     - char __user *buffer, size_t buflen)
4625     + char *buffer, size_t buflen)
4626     {
4627     struct request_key_auth *rka = dereference_key_locked(key);
4628     size_t datalen;
4629     @@ -97,8 +97,7 @@ static long request_key_auth_read(const struct key *key,
4630     if (buflen > datalen)
4631     buflen = datalen;
4632    
4633     - if (copy_to_user(buffer, rka->callout_info, buflen) != 0)
4634     - ret = -EFAULT;
4635     + memcpy(buffer, rka->callout_info, buflen);
4636     }
4637    
4638     return ret;
4639     diff --git a/security/keys/trusted.c b/security/keys/trusted.c
4640     index 1fbd77816610..36afc29aecc3 100644
4641     --- a/security/keys/trusted.c
4642     +++ b/security/keys/trusted.c
4643     @@ -1144,11 +1144,10 @@ out:
4644     * trusted_read - copy the sealed blob data to userspace in hex.
4645     * On success, return to userspace the trusted key datablob size.
4646     */
4647     -static long trusted_read(const struct key *key, char __user *buffer,
4648     +static long trusted_read(const struct key *key, char *buffer,
4649     size_t buflen)
4650     {
4651     const struct trusted_key_payload *p;
4652     - char *ascii_buf;
4653     char *bufp;
4654     int i;
4655    
4656     @@ -1157,18 +1156,9 @@ static long trusted_read(const struct key *key, char __user *buffer,
4657     return -EINVAL;
4658    
4659     if (buffer && buflen >= 2 * p->blob_len) {
4660     - ascii_buf = kmalloc_array(2, p->blob_len, GFP_KERNEL);
4661     - if (!ascii_buf)
4662     - return -ENOMEM;
4663     -
4664     - bufp = ascii_buf;
4665     + bufp = buffer;
4666     for (i = 0; i < p->blob_len; i++)
4667     bufp = hex_byte_pack(bufp, p->blob[i]);
4668     - if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
4669     - kzfree(ascii_buf);
4670     - return -EFAULT;
4671     - }
4672     - kzfree(ascii_buf);
4673     }
4674     return 2 * p->blob_len;
4675     }
4676     diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
4677     index 6f12de4ce549..07d4287e9084 100644
4678     --- a/security/keys/user_defined.c
4679     +++ b/security/keys/user_defined.c
4680     @@ -168,7 +168,7 @@ EXPORT_SYMBOL_GPL(user_describe);
4681     * read the key data
4682     * - the key's semaphore is read-locked
4683     */
4684     -long user_read(const struct key *key, char __user *buffer, size_t buflen)
4685     +long user_read(const struct key *key, char *buffer, size_t buflen)
4686     {
4687     const struct user_key_payload *upayload;
4688     long ret;
4689     @@ -181,8 +181,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen)
4690     if (buflen > upayload->datalen)
4691     buflen = upayload->datalen;
4692    
4693     - if (copy_to_user(buffer, upayload->data, buflen) != 0)
4694     - ret = -EFAULT;
4695     + memcpy(buffer, upayload->data, buflen);
4696     }
4697    
4698     return ret;
4699     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4700     index 1db9d0579c72..72bbfeddea24 100644
4701     --- a/sound/pci/hda/hda_intel.c
4702     +++ b/sound/pci/hda/hda_intel.c
4703     @@ -1068,6 +1068,8 @@ static int azx_freeze_noirq(struct device *dev)
4704     struct azx *chip = card->private_data;
4705     struct pci_dev *pci = to_pci_dev(dev);
4706    
4707     + if (!azx_is_pm_ready(card))
4708     + return 0;
4709     if (chip->driver_type == AZX_DRIVER_SKL)
4710     pci_set_power_state(pci, PCI_D3hot);
4711    
4712     @@ -1080,6 +1082,8 @@ static int azx_thaw_noirq(struct device *dev)
4713     struct azx *chip = card->private_data;
4714     struct pci_dev *pci = to_pci_dev(dev);
4715    
4716     + if (!azx_is_pm_ready(card))
4717     + return 0;
4718     if (chip->driver_type == AZX_DRIVER_SKL)
4719     pci_set_power_state(pci, PCI_D0);
4720    
4721     @@ -1976,24 +1980,15 @@ static void azx_firmware_cb(const struct firmware *fw, void *context)
4722     {
4723     struct snd_card *card = context;
4724     struct azx *chip = card->private_data;
4725     - struct pci_dev *pci = chip->pci;
4726    
4727     - if (!fw) {
4728     - dev_err(card->dev, "Cannot load firmware, aborting\n");
4729     - goto error;
4730     - }
4731     -
4732     - chip->fw = fw;
4733     + if (fw)
4734     + chip->fw = fw;
4735     + else
4736     + dev_err(card->dev, "Cannot load firmware, continue without patching\n");
4737     if (!chip->disabled) {
4738     /* continue probing */
4739     - if (azx_probe_continue(chip))
4740     - goto error;
4741     + azx_probe_continue(chip);
4742     }
4743     - return; /* OK */
4744     -
4745     - error:
4746     - snd_card_free(card);
4747     - pci_set_drvdata(pci, NULL);
4748     }
4749     #endif
4750    
4751     diff --git a/tools/objtool/check.c b/tools/objtool/check.c
4752     index f53d3c515cdc..9fa4e1a46ca9 100644
4753     --- a/tools/objtool/check.c
4754     +++ b/tools/objtool/check.c
4755     @@ -1010,10 +1010,7 @@ static struct rela *find_jump_table(struct objtool_file *file,
4756     * it.
4757     */
4758     for (;
4759     - &insn->list != &file->insn_list &&
4760     - insn->sec == func->sec &&
4761     - insn->offset >= func->offset;
4762     -
4763     + &insn->list != &file->insn_list && insn->func && insn->func->pfunc == func;
4764     insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
4765    
4766     if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
4767     diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
4768     index eba9a970703b..925722217edf 100644
4769     --- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
4770     +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
4771     @@ -82,6 +82,7 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
4772     void test_get_stack_raw_tp(void)
4773     {
4774     const char *file = "./test_get_stack_rawtp.o";
4775     + const char *file_err = "./test_get_stack_rawtp_err.o";
4776     const char *prog_name = "raw_tracepoint/sys_enter";
4777     int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
4778     struct perf_buffer_opts pb_opts = {};
4779     @@ -93,6 +94,10 @@ void test_get_stack_raw_tp(void)
4780     struct bpf_map *map;
4781     cpu_set_t cpu_set;
4782    
4783     + err = bpf_prog_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
4784     + if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno))
4785     + return;
4786     +
4787     err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
4788     if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
4789     return;
4790     diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c
4791     new file mode 100644
4792     index 000000000000..8941a41c2a55
4793     --- /dev/null
4794     +++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c
4795     @@ -0,0 +1,26 @@
4796     +// SPDX-License-Identifier: GPL-2.0
4797     +
4798     +#include <linux/bpf.h>
4799     +#include <bpf/bpf_helpers.h>
4800     +
4801     +#define MAX_STACK_RAWTP 10
4802     +
4803     +SEC("raw_tracepoint/sys_enter")
4804     +int bpf_prog2(void *ctx)
4805     +{
4806     + __u64 stack[MAX_STACK_RAWTP];
4807     + int error;
4808     +
4809     + /* set all the flags which should return -EINVAL */
4810     + error = bpf_get_stack(ctx, stack, 0, -1);
4811     + if (error < 0)
4812     + goto loop;
4813     +
4814     + return error;
4815     +loop:
4816     + while (1) {
4817     + error++;
4818     + }
4819     +}
4820     +
4821     +char _license[] SEC("license") = "GPL";
4822     diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
4823     index f24d50f09dbe..371926771db5 100644
4824     --- a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
4825     +++ b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
4826     @@ -9,17 +9,17 @@
4827     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4828     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
4829     BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
4830     - BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
4831     + BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2),
4832     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4833     BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4834     - BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
4835     + BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2),
4836     BPF_MOV64_IMM(BPF_REG_4, 256),
4837     BPF_EMIT_CALL(BPF_FUNC_get_stack),
4838     BPF_MOV64_IMM(BPF_REG_1, 0),
4839     BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4840     BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
4841     BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
4842     - BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
4843     + BPF_JMP_REG(BPF_JSLT, BPF_REG_8, BPF_REG_1, 16),
4844     BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
4845     BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4846     BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
4847     @@ -29,7 +29,7 @@
4848     BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4849     BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
4850     BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4851     - BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
4852     + BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2),
4853     BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
4854     BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
4855     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),