Contents of /trunk/kernel-alx-legacy/patches-4.9/0105-4.9.6-all-fixes.patch
Parent Directory | Revision Log
Revision 3608 -
(show annotations)
(download)
Fri Aug 14 07:34:29 2020 UTC (4 years, 1 month ago) by niro
File size: 149694 byte(s)
Fri Aug 14 07:34:29 2020 UTC (4 years, 1 month ago) by niro
File size: 149694 byte(s)
-added kerenl-alx-legacy pkg
1 | diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.txt b/Documentation/devicetree/bindings/clock/imx31-clock.txt |
2 | index 19df842c694f..8163d565f697 100644 |
3 | --- a/Documentation/devicetree/bindings/clock/imx31-clock.txt |
4 | +++ b/Documentation/devicetree/bindings/clock/imx31-clock.txt |
5 | @@ -77,7 +77,7 @@ Examples: |
6 | clks: ccm@53f80000{ |
7 | compatible = "fsl,imx31-ccm"; |
8 | reg = <0x53f80000 0x4000>; |
9 | - interrupts = <0 31 0x04 0 53 0x04>; |
10 | + interrupts = <31>, <53>; |
11 | #clock-cells = <1>; |
12 | }; |
13 | |
14 | diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt |
15 | index 37babf91f2cb..922dec8fa07e 100644 |
16 | --- a/Documentation/kernel-parameters.txt |
17 | +++ b/Documentation/kernel-parameters.txt |
18 | @@ -3998,10 +3998,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. |
19 | it if 0 is given (See Documentation/cgroup-v1/memory.txt) |
20 | |
21 | swiotlb= [ARM,IA-64,PPC,MIPS,X86] |
22 | - Format: { <int> | force } |
23 | + Format: { <int> | force | noforce } |
24 | <int> -- Number of I/O TLB slabs |
25 | force -- force using of bounce buffers even if they |
26 | wouldn't be automatically used by the kernel |
27 | + noforce -- Never use bounce buffers (for debugging) |
28 | |
29 | switches= [HW,M68k] |
30 | |
31 | diff --git a/Makefile b/Makefile |
32 | index 2a8af8af7b27..ef95231d1625 100644 |
33 | --- a/Makefile |
34 | +++ b/Makefile |
35 | @@ -1,6 +1,6 @@ |
36 | VERSION = 4 |
37 | PATCHLEVEL = 9 |
38 | -SUBLEVEL = 5 |
39 | +SUBLEVEL = 6 |
40 | EXTRAVERSION = |
41 | NAME = Roaring Lionus |
42 | |
43 | diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig |
44 | index bd204bfa29ed..249e10190d20 100644 |
45 | --- a/arch/arc/Kconfig |
46 | +++ b/arch/arc/Kconfig |
47 | @@ -28,7 +28,7 @@ config ARC |
48 | select HAVE_KPROBES |
49 | select HAVE_KRETPROBES |
50 | select HAVE_MEMBLOCK |
51 | - select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND |
52 | + select HAVE_MOD_ARCH_SPECIFIC |
53 | select HAVE_OPROFILE |
54 | select HAVE_PERF_EVENTS |
55 | select HANDLE_DOMAIN_IRQ |
56 | diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h |
57 | index 6e91d8b339c3..567590ea8f6c 100644 |
58 | --- a/arch/arc/include/asm/module.h |
59 | +++ b/arch/arc/include/asm/module.h |
60 | @@ -14,13 +14,13 @@ |
61 | |
62 | #include <asm-generic/module.h> |
63 | |
64 | -#ifdef CONFIG_ARC_DW2_UNWIND |
65 | struct mod_arch_specific { |
66 | +#ifdef CONFIG_ARC_DW2_UNWIND |
67 | void *unw_info; |
68 | int unw_sec_idx; |
69 | +#endif |
70 | const char *secstr; |
71 | }; |
72 | -#endif |
73 | |
74 | #define MODULE_PROC_FAMILY "ARC700" |
75 | |
76 | diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c |
77 | index 42e964db2967..3d99a6091332 100644 |
78 | --- a/arch/arc/kernel/module.c |
79 | +++ b/arch/arc/kernel/module.c |
80 | @@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, |
81 | #ifdef CONFIG_ARC_DW2_UNWIND |
82 | mod->arch.unw_sec_idx = 0; |
83 | mod->arch.unw_info = NULL; |
84 | - mod->arch.secstr = secstr; |
85 | #endif |
86 | + mod->arch.secstr = secstr; |
87 | return 0; |
88 | } |
89 | |
90 | @@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, |
91 | |
92 | } |
93 | |
94 | +#ifdef CONFIG_ARC_DW2_UNWIND |
95 | if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0) |
96 | module->arch.unw_sec_idx = tgtsec; |
97 | +#endif |
98 | |
99 | return 0; |
100 | |
101 | diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile |
102 | index c558ba75cbcc..7037201c5e3a 100644 |
103 | --- a/arch/arm/boot/dts/Makefile |
104 | +++ b/arch/arm/boot/dts/Makefile |
105 | @@ -485,6 +485,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \ |
106 | am3517-evm.dtb \ |
107 | am3517_mt_ventoux.dtb \ |
108 | logicpd-torpedo-37xx-devkit.dtb \ |
109 | + logicpd-som-lv-37xx-devkit.dtb \ |
110 | omap3430-sdp.dtb \ |
111 | omap3-beagle.dtb \ |
112 | omap3-beagle-xm.dtb \ |
113 | diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi |
114 | index 194d884c9de1..795c1467fa50 100644 |
115 | --- a/arch/arm/boot/dts/am33xx.dtsi |
116 | +++ b/arch/arm/boot/dts/am33xx.dtsi |
117 | @@ -16,6 +16,7 @@ |
118 | interrupt-parent = <&intc>; |
119 | #address-cells = <1>; |
120 | #size-cells = <1>; |
121 | + chosen { }; |
122 | |
123 | aliases { |
124 | i2c0 = &i2c0; |
125 | diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi |
126 | index a275fa956813..a20a71d9d22e 100644 |
127 | --- a/arch/arm/boot/dts/am4372.dtsi |
128 | +++ b/arch/arm/boot/dts/am4372.dtsi |
129 | @@ -16,6 +16,7 @@ |
130 | interrupt-parent = <&wakeupgen>; |
131 | #address-cells = <1>; |
132 | #size-cells = <1>; |
133 | + chosen { }; |
134 | |
135 | memory@0 { |
136 | device_type = "memory"; |
137 | diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi |
138 | index 46d46d894a44..74dd21b7373c 100644 |
139 | --- a/arch/arm/boot/dts/bcm283x.dtsi |
140 | +++ b/arch/arm/boot/dts/bcm283x.dtsi |
141 | @@ -104,7 +104,7 @@ |
142 | reg = <0x7e104000 0x10>; |
143 | }; |
144 | |
145 | - mailbox: mailbox@7e00b800 { |
146 | + mailbox: mailbox@7e00b880 { |
147 | compatible = "brcm,bcm2835-mbox"; |
148 | reg = <0x7e00b880 0x40>; |
149 | interrupts = <0 1>; |
150 | diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts |
151 | index 41de15fe15a2..78492a0bbbab 100644 |
152 | --- a/arch/arm/boot/dts/da850-evm.dts |
153 | +++ b/arch/arm/boot/dts/da850-evm.dts |
154 | @@ -99,6 +99,7 @@ |
155 | #size-cells = <1>; |
156 | compatible = "m25p64"; |
157 | spi-max-frequency = <30000000>; |
158 | + m25p,fast-read; |
159 | reg = <0>; |
160 | partition@0 { |
161 | label = "U-Boot-SPL"; |
162 | diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi |
163 | index ff90a6ce6bdc..d87efab24fa2 100644 |
164 | --- a/arch/arm/boot/dts/dm814x.dtsi |
165 | +++ b/arch/arm/boot/dts/dm814x.dtsi |
166 | @@ -12,6 +12,7 @@ |
167 | interrupt-parent = <&intc>; |
168 | #address-cells = <1>; |
169 | #size-cells = <1>; |
170 | + chosen { }; |
171 | |
172 | aliases { |
173 | i2c0 = &i2c1; |
174 | diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi |
175 | index f1e0f771ff29..cbdfbc4e4a26 100644 |
176 | --- a/arch/arm/boot/dts/dm816x.dtsi |
177 | +++ b/arch/arm/boot/dts/dm816x.dtsi |
178 | @@ -12,6 +12,7 @@ |
179 | interrupt-parent = <&intc>; |
180 | #address-cells = <1>; |
181 | #size-cells = <1>; |
182 | + chosen { }; |
183 | |
184 | aliases { |
185 | i2c0 = &i2c1; |
186 | diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi |
187 | index d4fcd68f6349..064d84f87e45 100644 |
188 | --- a/arch/arm/boot/dts/dra7.dtsi |
189 | +++ b/arch/arm/boot/dts/dra7.dtsi |
190 | @@ -18,6 +18,7 @@ |
191 | |
192 | compatible = "ti,dra7xx"; |
193 | interrupt-parent = <&crossbar_mpu>; |
194 | + chosen { }; |
195 | |
196 | aliases { |
197 | i2c0 = &i2c1; |
198 | @@ -1376,6 +1377,7 @@ |
199 | phy-names = "sata-phy"; |
200 | clocks = <&sata_ref_clk>; |
201 | ti,hwmods = "sata"; |
202 | + ports-implemented = <0x1>; |
203 | }; |
204 | |
205 | rtc: rtc@48838000 { |
206 | diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi |
207 | index 1ce7ae94e7ad..11e9e6bd8abb 100644 |
208 | --- a/arch/arm/boot/dts/imx31.dtsi |
209 | +++ b/arch/arm/boot/dts/imx31.dtsi |
210 | @@ -30,11 +30,11 @@ |
211 | }; |
212 | }; |
213 | |
214 | - avic: avic-interrupt-controller@60000000 { |
215 | + avic: interrupt-controller@68000000 { |
216 | compatible = "fsl,imx31-avic", "fsl,avic"; |
217 | interrupt-controller; |
218 | #interrupt-cells = <1>; |
219 | - reg = <0x60000000 0x100000>; |
220 | + reg = <0x68000000 0x100000>; |
221 | }; |
222 | |
223 | soc { |
224 | @@ -118,13 +118,6 @@ |
225 | interrupts = <19>; |
226 | clocks = <&clks 25>; |
227 | }; |
228 | - |
229 | - clks: ccm@53f80000{ |
230 | - compatible = "fsl,imx31-ccm"; |
231 | - reg = <0x53f80000 0x4000>; |
232 | - interrupts = <0 31 0x04 0 53 0x04>; |
233 | - #clock-cells = <1>; |
234 | - }; |
235 | }; |
236 | |
237 | aips@53f00000 { /* AIPS2 */ |
238 | @@ -134,6 +127,13 @@ |
239 | reg = <0x53f00000 0x100000>; |
240 | ranges; |
241 | |
242 | + clks: ccm@53f80000{ |
243 | + compatible = "fsl,imx31-ccm"; |
244 | + reg = <0x53f80000 0x4000>; |
245 | + interrupts = <31>, <53>; |
246 | + #clock-cells = <1>; |
247 | + }; |
248 | + |
249 | gpt: timer@53f90000 { |
250 | compatible = "fsl,imx31-gpt"; |
251 | reg = <0x53f90000 0x4000>; |
252 | diff --git a/arch/arm/boot/dts/imx6q-cm-fx6.dts b/arch/arm/boot/dts/imx6q-cm-fx6.dts |
253 | index 59bc5a4dce17..a150bca84daa 100644 |
254 | --- a/arch/arm/boot/dts/imx6q-cm-fx6.dts |
255 | +++ b/arch/arm/boot/dts/imx6q-cm-fx6.dts |
256 | @@ -183,7 +183,6 @@ |
257 | MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0 |
258 | MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0 |
259 | MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0 |
260 | - MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8 |
261 | >; |
262 | }; |
263 | |
264 | diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi |
265 | index b0b3220a1fd9..01166ba36f27 100644 |
266 | --- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi |
267 | +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi |
268 | @@ -319,8 +319,6 @@ |
269 | compatible = "fsl,imx6q-nitrogen6_max-sgtl5000", |
270 | "fsl,imx-audio-sgtl5000"; |
271 | model = "imx6q-nitrogen6_max-sgtl5000"; |
272 | - pinctrl-names = "default"; |
273 | - pinctrl-0 = <&pinctrl_sgtl5000>; |
274 | ssi-controller = <&ssi1>; |
275 | audio-codec = <&codec>; |
276 | audio-routing = |
277 | @@ -402,6 +400,8 @@ |
278 | |
279 | codec: sgtl5000@0a { |
280 | compatible = "fsl,sgtl5000"; |
281 | + pinctrl-names = "default"; |
282 | + pinctrl-0 = <&pinctrl_sgtl5000>; |
283 | reg = <0x0a>; |
284 | clocks = <&clks IMX6QDL_CLK_CKO>; |
285 | VDDA-supply = <®_2p5v>; |
286 | diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts |
287 | index da8598402ab8..38faa90007d7 100644 |
288 | --- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts |
289 | +++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts |
290 | @@ -158,7 +158,7 @@ |
291 | &mmc1 { |
292 | interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>; |
293 | pinctrl-names = "default"; |
294 | - pinctrl-0 = <&mmc1_pins &mmc1_cd>; |
295 | + pinctrl-0 = <&mmc1_pins>; |
296 | wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */ |
297 | cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */ |
298 | vmmc-supply = <&vmmc1>; |
299 | @@ -193,7 +193,8 @@ |
300 | OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */ |
301 | OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */ |
302 | OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */ |
303 | - OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 sdmmc1_wp*/ |
304 | + OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 */ |
305 | + OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */ |
306 | >; |
307 | }; |
308 | |
309 | @@ -242,12 +243,6 @@ |
310 | OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4) /* sys_boot6.gpio_8 */ |
311 | >; |
312 | }; |
313 | - |
314 | - mmc1_cd: pinmux_mmc1_cd { |
315 | - pinctrl-single,pins = < |
316 | - OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */ |
317 | - >; |
318 | - }; |
319 | }; |
320 | |
321 | |
322 | diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi |
323 | index 4f793a025a72..f1d6de8b3c19 100644 |
324 | --- a/arch/arm/boot/dts/omap2.dtsi |
325 | +++ b/arch/arm/boot/dts/omap2.dtsi |
326 | @@ -17,6 +17,7 @@ |
327 | interrupt-parent = <&intc>; |
328 | #address-cells = <1>; |
329 | #size-cells = <1>; |
330 | + chosen { }; |
331 | |
332 | aliases { |
333 | serial0 = &uart1; |
334 | diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi |
335 | index 353d818ce5a6..2008648b8c9f 100644 |
336 | --- a/arch/arm/boot/dts/omap3.dtsi |
337 | +++ b/arch/arm/boot/dts/omap3.dtsi |
338 | @@ -17,6 +17,7 @@ |
339 | interrupt-parent = <&intc>; |
340 | #address-cells = <1>; |
341 | #size-cells = <1>; |
342 | + chosen { }; |
343 | |
344 | aliases { |
345 | i2c0 = &i2c1; |
346 | diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi |
347 | index 0ced079b7ae3..9c289ddab3df 100644 |
348 | --- a/arch/arm/boot/dts/omap4.dtsi |
349 | +++ b/arch/arm/boot/dts/omap4.dtsi |
350 | @@ -15,6 +15,7 @@ |
351 | interrupt-parent = <&wakeupgen>; |
352 | #address-cells = <1>; |
353 | #size-cells = <1>; |
354 | + chosen { }; |
355 | |
356 | aliases { |
357 | i2c0 = &i2c1; |
358 | diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi |
359 | index 25262118ec3d..1d1d8e90cd80 100644 |
360 | --- a/arch/arm/boot/dts/omap5.dtsi |
361 | +++ b/arch/arm/boot/dts/omap5.dtsi |
362 | @@ -17,6 +17,7 @@ |
363 | |
364 | compatible = "ti,omap5"; |
365 | interrupt-parent = <&wakeupgen>; |
366 | + chosen { }; |
367 | |
368 | aliases { |
369 | i2c0 = &i2c1; |
370 | @@ -985,6 +986,7 @@ |
371 | phy-names = "sata-phy"; |
372 | clocks = <&sata_ref_clk>; |
373 | ti,hwmods = "sata"; |
374 | + ports-implemented = <0x1>; |
375 | }; |
376 | |
377 | dss: dss@58000000 { |
378 | diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi |
379 | index 725ecb3c5fb4..7e860d3737ff 100644 |
380 | --- a/arch/arm/boot/dts/r8a7794.dtsi |
381 | +++ b/arch/arm/boot/dts/r8a7794.dtsi |
382 | @@ -319,7 +319,7 @@ |
383 | "ch12"; |
384 | clocks = <&mstp5_clks R8A7794_CLK_AUDIO_DMAC0>; |
385 | clock-names = "fck"; |
386 | - power-domains = <&cpg_clocks>; |
387 | + power-domains = <&sysc R8A7794_PD_ALWAYS_ON>; |
388 | #dma-cells = <1>; |
389 | dma-channels = <13>; |
390 | }; |
391 | @@ -1025,8 +1025,7 @@ |
392 | clocks = <&extal_clk &usb_extal_clk>; |
393 | #clock-cells = <1>; |
394 | clock-output-names = "main", "pll0", "pll1", "pll3", |
395 | - "lb", "qspi", "sdh", "sd0", "z", |
396 | - "rcan"; |
397 | + "lb", "qspi", "sdh", "sd0", "rcan"; |
398 | #power-domain-cells = <0>; |
399 | }; |
400 | /* Variable factor clocks */ |
401 | @@ -1483,7 +1482,7 @@ |
402 | "mix.0", "mix.1", |
403 | "dvc.0", "dvc.1", |
404 | "clk_a", "clk_b", "clk_c", "clk_i"; |
405 | - power-domains = <&cpg_clocks>; |
406 | + power-domains = <&sysc R8A7794_PD_ALWAYS_ON>; |
407 | |
408 | status = "disabled"; |
409 | |
410 | diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h |
411 | index 522b5feb4eaa..b62eaeb147aa 100644 |
412 | --- a/arch/arm/include/asm/cputype.h |
413 | +++ b/arch/arm/include/asm/cputype.h |
414 | @@ -94,6 +94,9 @@ |
415 | #define ARM_CPU_XSCALE_ARCH_V2 0x4000 |
416 | #define ARM_CPU_XSCALE_ARCH_V3 0x6000 |
417 | |
418 | +/* Qualcomm implemented cores */ |
419 | +#define ARM_CPU_PART_SCORPION 0x510002d0 |
420 | + |
421 | extern unsigned int processor_id; |
422 | |
423 | #ifdef CONFIG_CPU_CP15 |
424 | diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c |
425 | index b8df45883cf7..25538a935874 100644 |
426 | --- a/arch/arm/kernel/hw_breakpoint.c |
427 | +++ b/arch/arm/kernel/hw_breakpoint.c |
428 | @@ -1066,6 +1066,22 @@ static int __init arch_hw_breakpoint_init(void) |
429 | return 0; |
430 | } |
431 | |
432 | + /* |
433 | + * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD |
434 | + * whenever a WFI is issued, even if the core is not powered down, in |
435 | + * violation of the architecture. When DBGPRSR.SPD is set, accesses to |
436 | + * breakpoint and watchpoint registers are treated as undefined, so |
437 | + * this results in boot time and runtime failures when these are |
438 | + * accessed and we unexpectedly take a trap. |
439 | + * |
440 | + * It's not clear if/how this can be worked around, so we blacklist |
441 | + * Scorpion CPUs to avoid these issues. |
442 | + */ |
443 | + if (read_cpuid_part() == ARM_CPU_PART_SCORPION) { |
444 | + pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n"); |
445 | + return 0; |
446 | + } |
447 | + |
448 | has_ossr = core_has_os_save_restore(); |
449 | |
450 | /* Determine how many BRPs/WRPs are available. */ |
451 | diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c |
452 | index 22313cb53362..9af0701f7094 100644 |
453 | --- a/arch/arm/kernel/smp_tlb.c |
454 | +++ b/arch/arm/kernel/smp_tlb.c |
455 | @@ -9,6 +9,7 @@ |
456 | */ |
457 | #include <linux/preempt.h> |
458 | #include <linux/smp.h> |
459 | +#include <linux/uaccess.h> |
460 | |
461 | #include <asm/smp_plat.h> |
462 | #include <asm/tlbflush.h> |
463 | @@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg) |
464 | static inline void ipi_flush_tlb_page(void *arg) |
465 | { |
466 | struct tlb_args *ta = (struct tlb_args *)arg; |
467 | + unsigned int __ua_flags = uaccess_save_and_enable(); |
468 | |
469 | local_flush_tlb_page(ta->ta_vma, ta->ta_start); |
470 | + |
471 | + uaccess_restore(__ua_flags); |
472 | } |
473 | |
474 | static inline void ipi_flush_tlb_kernel_page(void *arg) |
475 | @@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg) |
476 | static inline void ipi_flush_tlb_range(void *arg) |
477 | { |
478 | struct tlb_args *ta = (struct tlb_args *)arg; |
479 | + unsigned int __ua_flags = uaccess_save_and_enable(); |
480 | |
481 | local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); |
482 | + |
483 | + uaccess_restore(__ua_flags); |
484 | } |
485 | |
486 | static inline void ipi_flush_tlb_kernel_range(void *arg) |
487 | diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c |
488 | index 8538910db202..a970e7fcba9e 100644 |
489 | --- a/arch/arm/mach-ux500/pm.c |
490 | +++ b/arch/arm/mach-ux500/pm.c |
491 | @@ -134,8 +134,8 @@ bool prcmu_pending_irq(void) |
492 | */ |
493 | bool prcmu_is_cpu_in_wfi(int cpu) |
494 | { |
495 | - return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : |
496 | - PRCM_ARM_WFI_STANDBY_WFI0; |
497 | + return readl(PRCM_ARM_WFI_STANDBY) & |
498 | + (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0); |
499 | } |
500 | |
501 | /* |
502 | diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h |
503 | index b71086d25195..53211a0acf0f 100644 |
504 | --- a/arch/arm64/include/asm/memory.h |
505 | +++ b/arch/arm64/include/asm/memory.h |
506 | @@ -217,7 +217,7 @@ static inline void *phys_to_virt(phys_addr_t x) |
507 | #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
508 | #else |
509 | #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) |
510 | -#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) |
511 | +#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) |
512 | |
513 | #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) |
514 | #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) |
515 | diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h |
516 | index b5c3933ed441..d1ff83dfe5de 100644 |
517 | --- a/arch/arm64/include/uapi/asm/ptrace.h |
518 | +++ b/arch/arm64/include/uapi/asm/ptrace.h |
519 | @@ -77,6 +77,7 @@ struct user_fpsimd_state { |
520 | __uint128_t vregs[32]; |
521 | __u32 fpsr; |
522 | __u32 fpcr; |
523 | + __u32 __reserved[2]; |
524 | }; |
525 | |
526 | struct user_hwdebug_state { |
527 | diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S |
528 | index 223d54a4d66b..79b0fe24d5b7 100644 |
529 | --- a/arch/arm64/kernel/entry.S |
530 | +++ b/arch/arm64/kernel/entry.S |
531 | @@ -624,7 +624,7 @@ el0_inv: |
532 | mov x0, sp |
533 | mov x1, #BAD_SYNC |
534 | mov x2, x25 |
535 | - bl bad_mode |
536 | + bl bad_el0_sync |
537 | b ret_to_user |
538 | ENDPROC(el0_sync) |
539 | |
540 | diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c |
541 | index e0c81da60f76..8eedeef375d6 100644 |
542 | --- a/arch/arm64/kernel/ptrace.c |
543 | +++ b/arch/arm64/kernel/ptrace.c |
544 | @@ -550,6 +550,8 @@ static int hw_break_set(struct task_struct *target, |
545 | /* (address, ctrl) registers */ |
546 | limit = regset->n * regset->size; |
547 | while (count && offset < limit) { |
548 | + if (count < PTRACE_HBP_ADDR_SZ) |
549 | + return -EINVAL; |
550 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, |
551 | offset, offset + PTRACE_HBP_ADDR_SZ); |
552 | if (ret) |
553 | @@ -559,6 +561,8 @@ static int hw_break_set(struct task_struct *target, |
554 | return ret; |
555 | offset += PTRACE_HBP_ADDR_SZ; |
556 | |
557 | + if (!count) |
558 | + break; |
559 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, |
560 | offset, offset + PTRACE_HBP_CTRL_SZ); |
561 | if (ret) |
562 | @@ -595,7 +599,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, |
563 | const void *kbuf, const void __user *ubuf) |
564 | { |
565 | int ret; |
566 | - struct user_pt_regs newregs; |
567 | + struct user_pt_regs newregs = task_pt_regs(target)->user_regs; |
568 | |
569 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); |
570 | if (ret) |
571 | @@ -625,7 +629,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, |
572 | const void *kbuf, const void __user *ubuf) |
573 | { |
574 | int ret; |
575 | - struct user_fpsimd_state newstate; |
576 | + struct user_fpsimd_state newstate = |
577 | + target->thread.fpsimd_state.user_fpsimd; |
578 | |
579 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); |
580 | if (ret) |
581 | @@ -649,7 +654,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, |
582 | const void *kbuf, const void __user *ubuf) |
583 | { |
584 | int ret; |
585 | - unsigned long tls; |
586 | + unsigned long tls = target->thread.tp_value; |
587 | |
588 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); |
589 | if (ret) |
590 | @@ -675,7 +680,8 @@ static int system_call_set(struct task_struct *target, |
591 | unsigned int pos, unsigned int count, |
592 | const void *kbuf, const void __user *ubuf) |
593 | { |
594 | - int syscallno, ret; |
595 | + int syscallno = task_pt_regs(target)->syscallno; |
596 | + int ret; |
597 | |
598 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); |
599 | if (ret) |
600 | @@ -947,7 +953,7 @@ static int compat_tls_set(struct task_struct *target, |
601 | const void __user *ubuf) |
602 | { |
603 | int ret; |
604 | - compat_ulong_t tls; |
605 | + compat_ulong_t tls = target->thread.tp_value; |
606 | |
607 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); |
608 | if (ret) |
609 | diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c |
610 | index c9986b3e0a96..11e5eae088ab 100644 |
611 | --- a/arch/arm64/kernel/traps.c |
612 | +++ b/arch/arm64/kernel/traps.c |
613 | @@ -596,17 +596,34 @@ const char *esr_get_class_string(u32 esr) |
614 | } |
615 | |
616 | /* |
617 | - * bad_mode handles the impossible case in the exception vector. |
618 | + * bad_mode handles the impossible case in the exception vector. This is always |
619 | + * fatal. |
620 | */ |
621 | asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) |
622 | { |
623 | - siginfo_t info; |
624 | - void __user *pc = (void __user *)instruction_pointer(regs); |
625 | console_verbose(); |
626 | |
627 | pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", |
628 | handler[reason], smp_processor_id(), esr, |
629 | esr_get_class_string(esr)); |
630 | + |
631 | + die("Oops - bad mode", regs, 0); |
632 | + local_irq_disable(); |
633 | + panic("bad mode"); |
634 | +} |
635 | + |
636 | +/* |
637 | + * bad_el0_sync handles unexpected, but potentially recoverable synchronous |
638 | + * exceptions taken from EL0. Unlike bad_mode, this returns. |
639 | + */ |
640 | +asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) |
641 | +{ |
642 | + siginfo_t info; |
643 | + void __user *pc = (void __user *)instruction_pointer(regs); |
644 | + console_verbose(); |
645 | + |
646 | + pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n", |
647 | + smp_processor_id(), esr, esr_get_class_string(esr)); |
648 | __show_regs(regs); |
649 | |
650 | info.si_signo = SIGILL; |
651 | @@ -614,7 +631,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) |
652 | info.si_code = ILL_ILLOPC; |
653 | info.si_addr = pc; |
654 | |
655 | - arm64_notify_die("Oops - bad mode", regs, &info, 0); |
656 | + current->thread.fault_address = 0; |
657 | + current->thread.fault_code = 0; |
658 | + |
659 | + force_sig_info(info.si_signo, &info, current); |
660 | } |
661 | |
662 | void __pte_error(const char *file, int line, unsigned long val) |
663 | diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c |
664 | index 3f74d0d98de6..02265a589ef5 100644 |
665 | --- a/arch/arm64/mm/dma-mapping.c |
666 | +++ b/arch/arm64/mm/dma-mapping.c |
667 | @@ -524,7 +524,8 @@ EXPORT_SYMBOL(dummy_dma_ops); |
668 | |
669 | static int __init arm64_dma_init(void) |
670 | { |
671 | - if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) |
672 | + if (swiotlb_force == SWIOTLB_FORCE || |
673 | + max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) |
674 | swiotlb = 1; |
675 | |
676 | return atomic_pool_init(); |
677 | diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c |
678 | index 212c4d1e2f26..380ebe705093 100644 |
679 | --- a/arch/arm64/mm/init.c |
680 | +++ b/arch/arm64/mm/init.c |
681 | @@ -401,8 +401,11 @@ static void __init free_unused_memmap(void) |
682 | */ |
683 | void __init mem_init(void) |
684 | { |
685 | - if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) |
686 | + if (swiotlb_force == SWIOTLB_FORCE || |
687 | + max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) |
688 | swiotlb_init(1); |
689 | + else |
690 | + swiotlb_force = SWIOTLB_NO_FORCE; |
691 | |
692 | set_max_mapnr(pfn_to_page(max_pfn) - mem_map); |
693 | |
694 | diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h |
695 | index c56ea8c84abb..c4ced1d01d57 100644 |
696 | --- a/arch/powerpc/include/asm/ppc-opcode.h |
697 | +++ b/arch/powerpc/include/asm/ppc-opcode.h |
698 | @@ -157,7 +157,7 @@ |
699 | #define PPC_INST_MCRXR 0x7c000400 |
700 | #define PPC_INST_MCRXR_MASK 0xfc0007fe |
701 | #define PPC_INST_MFSPR_PVR 0x7c1f42a6 |
702 | -#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff |
703 | +#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe |
704 | #define PPC_INST_MFTMR 0x7c0002dc |
705 | #define PPC_INST_MSGSND 0x7c00019c |
706 | #define PPC_INST_MSGCLR 0x7c0001dc |
707 | @@ -174,13 +174,13 @@ |
708 | #define PPC_INST_RFDI 0x4c00004e |
709 | #define PPC_INST_RFMCI 0x4c00004c |
710 | #define PPC_INST_MFSPR_DSCR 0x7c1102a6 |
711 | -#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff |
712 | +#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe |
713 | #define PPC_INST_MTSPR_DSCR 0x7c1103a6 |
714 | -#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff |
715 | +#define PPC_INST_MTSPR_DSCR_MASK 0xfc1ffffe |
716 | #define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6 |
717 | -#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff |
718 | +#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe |
719 | #define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6 |
720 | -#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff |
721 | +#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe |
722 | #define PPC_INST_MFVSRD 0x7c000066 |
723 | #define PPC_INST_MTVSRD 0x7c000166 |
724 | #define PPC_INST_SLBFEE 0x7c0007a7 |
725 | diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c |
726 | index b1ec62f2cc31..5c8f12fe9721 100644 |
727 | --- a/arch/powerpc/kernel/ptrace.c |
728 | +++ b/arch/powerpc/kernel/ptrace.c |
729 | @@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, |
730 | |
731 | flush_fp_to_thread(target); |
732 | |
733 | + for (i = 0; i < 32 ; i++) |
734 | + buf[i] = target->thread.TS_FPR(i); |
735 | + buf[32] = target->thread.fp_state.fpscr; |
736 | + |
737 | /* copy to local buffer then write that out */ |
738 | i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); |
739 | if (i) |
740 | @@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset, |
741 | flush_altivec_to_thread(target); |
742 | flush_vsx_to_thread(target); |
743 | |
744 | + for (i = 0; i < 32 ; i++) |
745 | + buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; |
746 | + |
747 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
748 | buf, 0, 32 * sizeof(double)); |
749 | if (!ret) |
750 | @@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target, |
751 | flush_fp_to_thread(target); |
752 | flush_altivec_to_thread(target); |
753 | |
754 | + for (i = 0; i < 32; i++) |
755 | + buf[i] = target->thread.TS_CKFPR(i); |
756 | + buf[32] = target->thread.ckfp_state.fpscr; |
757 | + |
758 | /* copy to local buffer then write that out */ |
759 | i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); |
760 | if (i) |
761 | @@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target, |
762 | flush_altivec_to_thread(target); |
763 | flush_vsx_to_thread(target); |
764 | |
765 | + for (i = 0; i < 32 ; i++) |
766 | + buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET]; |
767 | + |
768 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
769 | buf, 0, 32 * sizeof(double)); |
770 | if (!ret) |
771 | diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h |
772 | index 6447dc1c3d89..929b56d47ad9 100644 |
773 | --- a/arch/powerpc/perf/power9-events-list.h |
774 | +++ b/arch/powerpc/perf/power9-events-list.h |
775 | @@ -16,7 +16,7 @@ EVENT(PM_CYC, 0x0001e) |
776 | EVENT(PM_ICT_NOSLOT_CYC, 0x100f8) |
777 | EVENT(PM_CMPLU_STALL, 0x1e054) |
778 | EVENT(PM_INST_CMPL, 0x00002) |
779 | -EVENT(PM_BRU_CMPL, 0x40060) |
780 | +EVENT(PM_BRU_CMPL, 0x10012) |
781 | EVENT(PM_BR_MPRED_CMPL, 0x400f6) |
782 | |
783 | /* All L1 D cache load references counted at finish, gated by reject */ |
784 | diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c |
785 | index d38e86fd5720..60c57657c772 100644 |
786 | --- a/arch/powerpc/sysdev/xics/icp-opal.c |
787 | +++ b/arch/powerpc/sysdev/xics/icp-opal.c |
788 | @@ -20,6 +20,7 @@ |
789 | #include <asm/xics.h> |
790 | #include <asm/io.h> |
791 | #include <asm/opal.h> |
792 | +#include <asm/kvm_ppc.h> |
793 | |
794 | static void icp_opal_teardown_cpu(void) |
795 | { |
796 | @@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void) |
797 | * Should we be flagging idle loop instead? |
798 | * Or creating some task to be scheduled? |
799 | */ |
800 | - opal_int_eoi((0x00 << 24) | XICS_IPI); |
801 | + if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0) |
802 | + force_external_irq_replay(); |
803 | +} |
804 | + |
805 | +static unsigned int icp_opal_get_xirr(void) |
806 | +{ |
807 | + unsigned int kvm_xirr; |
808 | + __be32 hw_xirr; |
809 | + int64_t rc; |
810 | + |
811 | + /* Handle an interrupt latched by KVM first */ |
812 | + kvm_xirr = kvmppc_get_xics_latch(); |
813 | + if (kvm_xirr) |
814 | + return kvm_xirr; |
815 | + |
816 | + /* Then ask OPAL */ |
817 | + rc = opal_int_get_xirr(&hw_xirr, false); |
818 | + if (rc < 0) |
819 | + return 0; |
820 | + return be32_to_cpu(hw_xirr); |
821 | } |
822 | |
823 | static unsigned int icp_opal_get_irq(void) |
824 | @@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void) |
825 | unsigned int xirr; |
826 | unsigned int vec; |
827 | unsigned int irq; |
828 | - int64_t rc; |
829 | |
830 | - rc = opal_int_get_xirr(&xirr, false); |
831 | - if (rc < 0) |
832 | - return 0; |
833 | - xirr = be32_to_cpu(xirr); |
834 | + xirr = icp_opal_get_xirr(); |
835 | vec = xirr & 0x00ffffff; |
836 | if (vec == XICS_IRQ_SPURIOUS) |
837 | return 0; |
838 | @@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void) |
839 | xics_mask_unknown_vec(vec); |
840 | |
841 | /* We might learn about it later, so EOI it */ |
842 | - opal_int_eoi(xirr); |
843 | + if (opal_int_eoi(xirr) > 0) |
844 | + force_external_irq_replay(); |
845 | |
846 | return 0; |
847 | } |
848 | diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c |
849 | index 9c7a1ecfe6bd..47a1de77b18d 100644 |
850 | --- a/arch/s390/kvm/kvm-s390.c |
851 | +++ b/arch/s390/kvm/kvm-s390.c |
852 | @@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) |
853 | memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, |
854 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
855 | memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, |
856 | - S390_ARCH_FAC_LIST_SIZE_BYTE); |
857 | + sizeof(S390_lowcore.stfle_fac_list)); |
858 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) |
859 | ret = -EFAULT; |
860 | kfree(mach); |
861 | @@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
862 | |
863 | /* Populate the facility mask initially. */ |
864 | memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list, |
865 | - S390_ARCH_FAC_LIST_SIZE_BYTE); |
866 | + sizeof(S390_lowcore.stfle_fac_list)); |
867 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { |
868 | if (i < kvm_s390_fac_list_mask_size()) |
869 | kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i]; |
870 | diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c |
871 | index 48e6d84f173e..3d8ff40ecc6f 100644 |
872 | --- a/arch/x86/kernel/apic/io_apic.c |
873 | +++ b/arch/x86/kernel/apic/io_apic.c |
874 | @@ -1876,6 +1876,7 @@ static struct irq_chip ioapic_chip __read_mostly = { |
875 | .irq_ack = irq_chip_ack_parent, |
876 | .irq_eoi = ioapic_ack_level, |
877 | .irq_set_affinity = ioapic_set_affinity, |
878 | + .irq_retrigger = irq_chip_retrigger_hierarchy, |
879 | .flags = IRQCHIP_SKIP_SET_WAKE, |
880 | }; |
881 | |
882 | @@ -1887,6 +1888,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = { |
883 | .irq_ack = irq_chip_ack_parent, |
884 | .irq_eoi = ioapic_ir_ack_level, |
885 | .irq_set_affinity = ioapic_set_affinity, |
886 | + .irq_retrigger = irq_chip_retrigger_hierarchy, |
887 | .flags = IRQCHIP_SKIP_SET_WAKE, |
888 | }; |
889 | |
890 | diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c |
891 | index b47edb8f5256..8da13d4e77cc 100644 |
892 | --- a/arch/x86/kernel/pci-swiotlb.c |
893 | +++ b/arch/x86/kernel/pci-swiotlb.c |
894 | @@ -70,7 +70,7 @@ int __init pci_swiotlb_detect_override(void) |
895 | { |
896 | int use_swiotlb = swiotlb | swiotlb_force; |
897 | |
898 | - if (swiotlb_force) |
899 | + if (swiotlb_force == SWIOTLB_FORCE) |
900 | swiotlb = 1; |
901 | |
902 | return use_swiotlb; |
903 | diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c |
904 | index 3cd69832d7f4..3961103e9176 100644 |
905 | --- a/arch/x86/pci/acpi.c |
906 | +++ b/arch/x86/pci/acpi.c |
907 | @@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = { |
908 | DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"), |
909 | }, |
910 | }, |
911 | + /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */ |
912 | + { |
913 | + .callback = set_nouse_crs, |
914 | + .ident = "Supermicro X8DTH", |
915 | + .matches = { |
916 | + DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), |
917 | + DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"), |
918 | + DMI_MATCH(DMI_BIOS_VERSION, "2.0a"), |
919 | + }, |
920 | + }, |
921 | |
922 | /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */ |
923 | { |
924 | diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c |
925 | index 0e98e5d241d0..5f8b4b0302b6 100644 |
926 | --- a/arch/x86/xen/pci-swiotlb-xen.c |
927 | +++ b/arch/x86/xen/pci-swiotlb-xen.c |
928 | @@ -49,7 +49,7 @@ int __init pci_xen_swiotlb_detect(void) |
929 | * activate this IOMMU. If running as PV privileged, activate it |
930 | * irregardless. |
931 | */ |
932 | - if ((xen_initial_domain() || swiotlb || swiotlb_force)) |
933 | + if (xen_initial_domain() || swiotlb || swiotlb_force == SWIOTLB_FORCE) |
934 | xen_swiotlb = 1; |
935 | |
936 | /* If we are running under Xen, we MUST disable the native SWIOTLB. |
937 | diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c |
938 | index 8f3488b80896..7f6fed9f0703 100644 |
939 | --- a/drivers/clocksource/exynos_mct.c |
940 | +++ b/drivers/clocksource/exynos_mct.c |
941 | @@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu) |
942 | if (mct_int_type == MCT_INT_SPI) { |
943 | if (evt->irq != -1) |
944 | disable_irq_nosync(evt->irq); |
945 | + exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); |
946 | } else { |
947 | disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); |
948 | } |
949 | diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c |
950 | index bf3ea7603a58..712592cef1a2 100644 |
951 | --- a/drivers/devfreq/devfreq.c |
952 | +++ b/drivers/devfreq/devfreq.c |
953 | @@ -593,11 +593,16 @@ struct devfreq *devfreq_add_device(struct device *dev, |
954 | list_add(&devfreq->node, &devfreq_list); |
955 | |
956 | governor = find_devfreq_governor(devfreq->governor_name); |
957 | - if (!IS_ERR(governor)) |
958 | - devfreq->governor = governor; |
959 | - if (devfreq->governor) |
960 | - err = devfreq->governor->event_handler(devfreq, |
961 | - DEVFREQ_GOV_START, NULL); |
962 | + if (IS_ERR(governor)) { |
963 | + dev_err(dev, "%s: Unable to find governor for the device\n", |
964 | + __func__); |
965 | + err = PTR_ERR(governor); |
966 | + goto err_init; |
967 | + } |
968 | + |
969 | + devfreq->governor = governor; |
970 | + err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, |
971 | + NULL); |
972 | if (err) { |
973 | dev_err(dev, "%s: Unable to start governor for the device\n", |
974 | __func__); |
975 | diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c |
976 | index 29866f7e6d7e..1b21bb60e797 100644 |
977 | --- a/drivers/devfreq/exynos-bus.c |
978 | +++ b/drivers/devfreq/exynos-bus.c |
979 | @@ -498,7 +498,7 @@ static int exynos_bus_probe(struct platform_device *pdev) |
980 | if (IS_ERR(bus->devfreq)) { |
981 | dev_err(dev, |
982 | "failed to add devfreq dev with passive governor\n"); |
983 | - ret = -EPROBE_DEFER; |
984 | + ret = PTR_ERR(bus->devfreq); |
985 | goto err; |
986 | } |
987 | |
988 | diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c |
989 | index 030fe05ed43b..9f3dbc8c63d2 100644 |
990 | --- a/drivers/dma/pl330.c |
991 | +++ b/drivers/dma/pl330.c |
992 | @@ -448,6 +448,9 @@ struct dma_pl330_chan { |
993 | |
994 | /* for cyclic capability */ |
995 | bool cyclic; |
996 | + |
997 | + /* for runtime pm tracking */ |
998 | + bool active; |
999 | }; |
1000 | |
1001 | struct pl330_dmac { |
1002 | @@ -2031,6 +2034,7 @@ static void pl330_tasklet(unsigned long data) |
1003 | _stop(pch->thread); |
1004 | spin_unlock(&pch->thread->dmac->lock); |
1005 | power_down = true; |
1006 | + pch->active = false; |
1007 | } else { |
1008 | /* Make sure the PL330 Channel thread is active */ |
1009 | spin_lock(&pch->thread->dmac->lock); |
1010 | @@ -2050,6 +2054,7 @@ static void pl330_tasklet(unsigned long data) |
1011 | desc->status = PREP; |
1012 | list_move_tail(&desc->node, &pch->work_list); |
1013 | if (power_down) { |
1014 | + pch->active = true; |
1015 | spin_lock(&pch->thread->dmac->lock); |
1016 | _start(pch->thread); |
1017 | spin_unlock(&pch->thread->dmac->lock); |
1018 | @@ -2164,6 +2169,7 @@ static int pl330_terminate_all(struct dma_chan *chan) |
1019 | unsigned long flags; |
1020 | struct pl330_dmac *pl330 = pch->dmac; |
1021 | LIST_HEAD(list); |
1022 | + bool power_down = false; |
1023 | |
1024 | pm_runtime_get_sync(pl330->ddma.dev); |
1025 | spin_lock_irqsave(&pch->lock, flags); |
1026 | @@ -2174,6 +2180,8 @@ static int pl330_terminate_all(struct dma_chan *chan) |
1027 | pch->thread->req[0].desc = NULL; |
1028 | pch->thread->req[1].desc = NULL; |
1029 | pch->thread->req_running = -1; |
1030 | + power_down = pch->active; |
1031 | + pch->active = false; |
1032 | |
1033 | /* Mark all desc done */ |
1034 | list_for_each_entry(desc, &pch->submitted_list, node) { |
1035 | @@ -2191,6 +2199,8 @@ static int pl330_terminate_all(struct dma_chan *chan) |
1036 | list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); |
1037 | spin_unlock_irqrestore(&pch->lock, flags); |
1038 | pm_runtime_mark_last_busy(pl330->ddma.dev); |
1039 | + if (power_down) |
1040 | + pm_runtime_put_autosuspend(pl330->ddma.dev); |
1041 | pm_runtime_put_autosuspend(pl330->ddma.dev); |
1042 | |
1043 | return 0; |
1044 | @@ -2350,6 +2360,7 @@ static void pl330_issue_pending(struct dma_chan *chan) |
1045 | * updated on work_list emptiness status. |
1046 | */ |
1047 | WARN_ON(list_empty(&pch->submitted_list)); |
1048 | + pch->active = true; |
1049 | pm_runtime_get_sync(pch->dmac->ddma.dev); |
1050 | } |
1051 | list_splice_tail_init(&pch->submitted_list, &pch->work_list); |
1052 | diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c |
1053 | index 2e441d0ccd79..4c357d475465 100644 |
1054 | --- a/drivers/dma/sh/rcar-dmac.c |
1055 | +++ b/drivers/dma/sh/rcar-dmac.c |
1056 | @@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) |
1057 | { |
1058 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
1059 | struct rcar_dmac *dmac = to_rcar_dmac(chan->device); |
1060 | + struct rcar_dmac_chan_map *map = &rchan->map; |
1061 | struct rcar_dmac_desc_page *page, *_page; |
1062 | struct rcar_dmac_desc *desc; |
1063 | LIST_HEAD(list); |
1064 | @@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) |
1065 | free_page((unsigned long)page); |
1066 | } |
1067 | |
1068 | + /* Remove slave mapping if present. */ |
1069 | + if (map->slave.xfer_size) { |
1070 | + dma_unmap_resource(chan->device->dev, map->addr, |
1071 | + map->slave.xfer_size, map->dir, 0); |
1072 | + map->slave.xfer_size = 0; |
1073 | + } |
1074 | + |
1075 | pm_runtime_put(chan->device->dev); |
1076 | } |
1077 | |
1078 | diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c |
1079 | index 717704e9ae07..c0303f61c26a 100644 |
1080 | --- a/drivers/hid/hid-corsair.c |
1081 | +++ b/drivers/hid/hid-corsair.c |
1082 | @@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev) |
1083 | struct usb_interface *usbif = to_usb_interface(dev->parent); |
1084 | struct usb_device *usbdev = interface_to_usbdev(usbif); |
1085 | int brightness; |
1086 | - char data[8]; |
1087 | + char *data; |
1088 | + |
1089 | + data = kmalloc(8, GFP_KERNEL); |
1090 | + if (!data) |
1091 | + return -ENOMEM; |
1092 | |
1093 | ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), |
1094 | K90_REQUEST_STATUS, |
1095 | USB_DIR_IN | USB_TYPE_VENDOR | |
1096 | USB_RECIP_DEVICE, 0, 0, data, 8, |
1097 | USB_CTRL_SET_TIMEOUT); |
1098 | - if (ret < 0) { |
1099 | + if (ret < 5) { |
1100 | dev_warn(dev, "Failed to get K90 initial state (error %d).\n", |
1101 | ret); |
1102 | - return -EIO; |
1103 | + ret = -EIO; |
1104 | + goto out; |
1105 | } |
1106 | brightness = data[4]; |
1107 | if (brightness < 0 || brightness > 3) { |
1108 | dev_warn(dev, |
1109 | "Read invalid backlight brightness: %02hhx.\n", |
1110 | data[4]); |
1111 | - return -EIO; |
1112 | + ret = -EIO; |
1113 | + goto out; |
1114 | } |
1115 | - return brightness; |
1116 | + ret = brightness; |
1117 | +out: |
1118 | + kfree(data); |
1119 | + |
1120 | + return ret; |
1121 | } |
1122 | |
1123 | static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev) |
1124 | @@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev, |
1125 | struct usb_interface *usbif = to_usb_interface(dev->parent); |
1126 | struct usb_device *usbdev = interface_to_usbdev(usbif); |
1127 | const char *macro_mode; |
1128 | - char data[8]; |
1129 | + char *data; |
1130 | + |
1131 | + data = kmalloc(2, GFP_KERNEL); |
1132 | + if (!data) |
1133 | + return -ENOMEM; |
1134 | |
1135 | ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), |
1136 | K90_REQUEST_GET_MODE, |
1137 | USB_DIR_IN | USB_TYPE_VENDOR | |
1138 | USB_RECIP_DEVICE, 0, 0, data, 2, |
1139 | USB_CTRL_SET_TIMEOUT); |
1140 | - if (ret < 0) { |
1141 | + if (ret < 1) { |
1142 | dev_warn(dev, "Failed to get K90 initial mode (error %d).\n", |
1143 | ret); |
1144 | - return -EIO; |
1145 | + ret = -EIO; |
1146 | + goto out; |
1147 | } |
1148 | |
1149 | switch (data[0]) { |
1150 | @@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev, |
1151 | default: |
1152 | dev_warn(dev, "K90 in unknown mode: %02hhx.\n", |
1153 | data[0]); |
1154 | - return -EIO; |
1155 | + ret = -EIO; |
1156 | + goto out; |
1157 | } |
1158 | |
1159 | - return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); |
1160 | + ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); |
1161 | +out: |
1162 | + kfree(data); |
1163 | + |
1164 | + return ret; |
1165 | } |
1166 | |
1167 | static ssize_t k90_store_macro_mode(struct device *dev, |
1168 | @@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev, |
1169 | struct usb_interface *usbif = to_usb_interface(dev->parent); |
1170 | struct usb_device *usbdev = interface_to_usbdev(usbif); |
1171 | int current_profile; |
1172 | - char data[8]; |
1173 | + char *data; |
1174 | + |
1175 | + data = kmalloc(8, GFP_KERNEL); |
1176 | + if (!data) |
1177 | + return -ENOMEM; |
1178 | |
1179 | ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), |
1180 | K90_REQUEST_STATUS, |
1181 | USB_DIR_IN | USB_TYPE_VENDOR | |
1182 | USB_RECIP_DEVICE, 0, 0, data, 8, |
1183 | USB_CTRL_SET_TIMEOUT); |
1184 | - if (ret < 0) { |
1185 | + if (ret < 8) { |
1186 | dev_warn(dev, "Failed to get K90 initial state (error %d).\n", |
1187 | ret); |
1188 | - return -EIO; |
1189 | + ret = -EIO; |
1190 | + goto out; |
1191 | } |
1192 | current_profile = data[7]; |
1193 | if (current_profile < 1 || current_profile > 3) { |
1194 | dev_warn(dev, "Read invalid current profile: %02hhx.\n", |
1195 | data[7]); |
1196 | - return -EIO; |
1197 | + ret = -EIO; |
1198 | + goto out; |
1199 | } |
1200 | |
1201 | - return snprintf(buf, PAGE_SIZE, "%d\n", current_profile); |
1202 | + ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile); |
1203 | +out: |
1204 | + kfree(data); |
1205 | + |
1206 | + return ret; |
1207 | } |
1208 | |
1209 | static ssize_t k90_store_current_profile(struct device *dev, |
1210 | diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c |
1211 | index 1a2984c28b95..ae04826e82fc 100644 |
1212 | --- a/drivers/infiniband/core/cache.c |
1213 | +++ b/drivers/infiniband/core/cache.c |
1214 | @@ -770,12 +770,8 @@ static int _gid_table_setup_one(struct ib_device *ib_dev) |
1215 | int err = 0; |
1216 | |
1217 | table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL); |
1218 | - |
1219 | - if (!table) { |
1220 | - pr_warn("failed to allocate ib gid cache for %s\n", |
1221 | - ib_dev->name); |
1222 | + if (!table) |
1223 | return -ENOMEM; |
1224 | - } |
1225 | |
1226 | for (port = 0; port < ib_dev->phys_port_cnt; port++) { |
1227 | u8 rdma_port = port + rdma_start_port(ib_dev); |
1228 | @@ -1170,14 +1166,13 @@ int ib_cache_setup_one(struct ib_device *device) |
1229 | GFP_KERNEL); |
1230 | if (!device->cache.pkey_cache || |
1231 | !device->cache.lmc_cache) { |
1232 | - pr_warn("Couldn't allocate cache for %s\n", device->name); |
1233 | - return -ENOMEM; |
1234 | + err = -ENOMEM; |
1235 | + goto free; |
1236 | } |
1237 | |
1238 | err = gid_table_setup_one(device); |
1239 | if (err) |
1240 | - /* Allocated memory will be cleaned in the release function */ |
1241 | - return err; |
1242 | + goto free; |
1243 | |
1244 | for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) |
1245 | ib_cache_update(device, p + rdma_start_port(device)); |
1246 | @@ -1192,6 +1187,9 @@ int ib_cache_setup_one(struct ib_device *device) |
1247 | |
1248 | err: |
1249 | gid_table_cleanup_one(device); |
1250 | +free: |
1251 | + kfree(device->cache.pkey_cache); |
1252 | + kfree(device->cache.lmc_cache); |
1253 | return err; |
1254 | } |
1255 | |
1256 | diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c |
1257 | index b9bf0759f10a..8dfc76f8cbb4 100644 |
1258 | --- a/drivers/infiniband/hw/mlx4/ah.c |
1259 | +++ b/drivers/infiniband/hw/mlx4/ah.c |
1260 | @@ -114,7 +114,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr |
1261 | !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support)) |
1262 | --ah->av.eth.stat_rate; |
1263 | } |
1264 | - |
1265 | + ah->av.eth.sl_tclass_flowlabel |= |
1266 | + cpu_to_be32((ah_attr->grh.traffic_class << 20) | |
1267 | + ah_attr->grh.flow_label); |
1268 | /* |
1269 | * HW requires multicast LID so we just choose one. |
1270 | */ |
1271 | @@ -122,7 +124,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr |
1272 | ah->av.ib.dlid = cpu_to_be16(0xc000); |
1273 | |
1274 | memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16); |
1275 | - ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29); |
1276 | + ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29); |
1277 | |
1278 | return &ah->ibah; |
1279 | } |
1280 | diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c |
1281 | index 1672907ff219..18d309e40f1b 100644 |
1282 | --- a/drivers/infiniband/hw/mlx4/mad.c |
1283 | +++ b/drivers/infiniband/hw/mlx4/mad.c |
1284 | @@ -702,10 +702,18 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, |
1285 | |
1286 | /* If a grh is present, we demux according to it */ |
1287 | if (wc->wc_flags & IB_WC_GRH) { |
1288 | - slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id); |
1289 | - if (slave < 0) { |
1290 | - mlx4_ib_warn(ibdev, "failed matching grh\n"); |
1291 | - return -ENOENT; |
1292 | + if (grh->dgid.global.interface_id == |
1293 | + cpu_to_be64(IB_SA_WELL_KNOWN_GUID) && |
1294 | + grh->dgid.global.subnet_prefix == cpu_to_be64( |
1295 | + atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) { |
1296 | + slave = 0; |
1297 | + } else { |
1298 | + slave = mlx4_ib_find_real_gid(ibdev, port, |
1299 | + grh->dgid.global.interface_id); |
1300 | + if (slave < 0) { |
1301 | + mlx4_ib_warn(ibdev, "failed matching grh\n"); |
1302 | + return -ENOENT; |
1303 | + } |
1304 | } |
1305 | } |
1306 | /* Class-specific handling */ |
1307 | diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c |
1308 | index b597e8227591..46ad99595fd2 100644 |
1309 | --- a/drivers/infiniband/hw/mlx4/main.c |
1310 | +++ b/drivers/infiniband/hw/mlx4/main.c |
1311 | @@ -697,9 +697,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, |
1312 | if (err) |
1313 | goto out; |
1314 | |
1315 | - props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? |
1316 | - IB_WIDTH_4X : IB_WIDTH_1X; |
1317 | - props->active_speed = IB_SPEED_QDR; |
1318 | + props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) || |
1319 | + (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? |
1320 | + IB_WIDTH_4X : IB_WIDTH_1X; |
1321 | + props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? |
1322 | + IB_SPEED_FDR : IB_SPEED_QDR; |
1323 | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS; |
1324 | props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; |
1325 | props->max_msg_sz = mdev->dev->caps.max_msg_sz; |
1326 | @@ -2820,14 +2822,19 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) |
1327 | goto err_steer_qp_release; |
1328 | } |
1329 | |
1330 | - bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count); |
1331 | - |
1332 | - err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE( |
1333 | - dev, ibdev->steer_qpn_base, |
1334 | - ibdev->steer_qpn_base + |
1335 | - ibdev->steer_qpn_count - 1); |
1336 | - if (err) |
1337 | - goto err_steer_free_bitmap; |
1338 | + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) { |
1339 | + bitmap_zero(ibdev->ib_uc_qpns_bitmap, |
1340 | + ibdev->steer_qpn_count); |
1341 | + err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE( |
1342 | + dev, ibdev->steer_qpn_base, |
1343 | + ibdev->steer_qpn_base + |
1344 | + ibdev->steer_qpn_count - 1); |
1345 | + if (err) |
1346 | + goto err_steer_free_bitmap; |
1347 | + } else { |
1348 | + bitmap_fill(ibdev->ib_uc_qpns_bitmap, |
1349 | + ibdev->steer_qpn_count); |
1350 | + } |
1351 | } |
1352 | |
1353 | for (j = 1; j <= ibdev->dev->caps.num_ports; j++) |
1354 | diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c |
1355 | index 570bc866b1d6..c22454383976 100644 |
1356 | --- a/drivers/infiniband/hw/mlx4/qp.c |
1357 | +++ b/drivers/infiniband/hw/mlx4/qp.c |
1358 | @@ -1280,7 +1280,8 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp) |
1359 | if (is_qp0(dev, mqp)) |
1360 | mlx4_CLOSE_PORT(dev->dev, mqp->port); |
1361 | |
1362 | - if (dev->qp1_proxy[mqp->port - 1] == mqp) { |
1363 | + if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI && |
1364 | + dev->qp1_proxy[mqp->port - 1] == mqp) { |
1365 | mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); |
1366 | dev->qp1_proxy[mqp->port - 1] = NULL; |
1367 | mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); |
1368 | @@ -1764,14 +1765,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, |
1369 | u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 : |
1370 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port; |
1371 | union ib_gid gid; |
1372 | - struct ib_gid_attr gid_attr; |
1373 | + struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_IB}; |
1374 | u16 vlan = 0xffff; |
1375 | u8 smac[ETH_ALEN]; |
1376 | int status = 0; |
1377 | int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) && |
1378 | attr->ah_attr.ah_flags & IB_AH_GRH; |
1379 | |
1380 | - if (is_eth) { |
1381 | + if (is_eth && attr->ah_attr.ah_flags & IB_AH_GRH) { |
1382 | int index = attr->ah_attr.grh.sgid_index; |
1383 | |
1384 | status = ib_get_cached_gid(ibqp->device, port_num, |
1385 | diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c |
1386 | index 32b09f059c84..4cab29ea394c 100644 |
1387 | --- a/drivers/infiniband/hw/mlx5/main.c |
1388 | +++ b/drivers/infiniband/hw/mlx5/main.c |
1389 | @@ -496,6 +496,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, |
1390 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
1391 | struct mlx5_core_dev *mdev = dev->mdev; |
1392 | int err = -ENOMEM; |
1393 | + int max_sq_desc; |
1394 | int max_rq_sg; |
1395 | int max_sq_sg; |
1396 | u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); |
1397 | @@ -618,9 +619,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, |
1398 | props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); |
1399 | max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / |
1400 | sizeof(struct mlx5_wqe_data_seg); |
1401 | - max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) - |
1402 | - sizeof(struct mlx5_wqe_ctrl_seg)) / |
1403 | - sizeof(struct mlx5_wqe_data_seg); |
1404 | + max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512); |
1405 | + max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) - |
1406 | + sizeof(struct mlx5_wqe_raddr_seg)) / |
1407 | + sizeof(struct mlx5_wqe_data_seg); |
1408 | props->max_sge = min(max_rq_sg, max_sq_sg); |
1409 | props->max_sge_rd = MLX5_MAX_SGE_RD; |
1410 | props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); |
1411 | diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c |
1412 | index 4e9012463c37..be2d02b6a6aa 100644 |
1413 | --- a/drivers/infiniband/hw/mlx5/mr.c |
1414 | +++ b/drivers/infiniband/hw/mlx5/mr.c |
1415 | @@ -628,7 +628,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) |
1416 | ent->order = i + 2; |
1417 | ent->dev = dev; |
1418 | |
1419 | - if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) |
1420 | + if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && |
1421 | + (mlx5_core_is_pf(dev->mdev))) |
1422 | limit = dev->mdev->profile->mr_cache[i].limit; |
1423 | else |
1424 | limit = 0; |
1425 | @@ -646,6 +647,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) |
1426 | return 0; |
1427 | } |
1428 | |
1429 | +static void wait_for_async_commands(struct mlx5_ib_dev *dev) |
1430 | +{ |
1431 | + struct mlx5_mr_cache *cache = &dev->cache; |
1432 | + struct mlx5_cache_ent *ent; |
1433 | + int total = 0; |
1434 | + int i; |
1435 | + int j; |
1436 | + |
1437 | + for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { |
1438 | + ent = &cache->ent[i]; |
1439 | + for (j = 0 ; j < 1000; j++) { |
1440 | + if (!ent->pending) |
1441 | + break; |
1442 | + msleep(50); |
1443 | + } |
1444 | + } |
1445 | + for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { |
1446 | + ent = &cache->ent[i]; |
1447 | + total += ent->pending; |
1448 | + } |
1449 | + |
1450 | + if (total) |
1451 | + mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total); |
1452 | + else |
1453 | + mlx5_ib_warn(dev, "done with all pending requests\n"); |
1454 | +} |
1455 | + |
1456 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) |
1457 | { |
1458 | int i; |
1459 | @@ -659,6 +687,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) |
1460 | clean_keys(dev, i); |
1461 | |
1462 | destroy_workqueue(dev->cache.wq); |
1463 | + wait_for_async_commands(dev); |
1464 | del_timer_sync(&dev->delay_timer); |
1465 | |
1466 | return 0; |
1467 | diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c |
1468 | index d1e921816bfe..aee3942ec68d 100644 |
1469 | --- a/drivers/infiniband/hw/mlx5/qp.c |
1470 | +++ b/drivers/infiniband/hw/mlx5/qp.c |
1471 | @@ -351,6 +351,29 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr) |
1472 | return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); |
1473 | } |
1474 | |
1475 | +static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size) |
1476 | +{ |
1477 | + int max_sge; |
1478 | + |
1479 | + if (attr->qp_type == IB_QPT_RC) |
1480 | + max_sge = (min_t(int, wqe_size, 512) - |
1481 | + sizeof(struct mlx5_wqe_ctrl_seg) - |
1482 | + sizeof(struct mlx5_wqe_raddr_seg)) / |
1483 | + sizeof(struct mlx5_wqe_data_seg); |
1484 | + else if (attr->qp_type == IB_QPT_XRC_INI) |
1485 | + max_sge = (min_t(int, wqe_size, 512) - |
1486 | + sizeof(struct mlx5_wqe_ctrl_seg) - |
1487 | + sizeof(struct mlx5_wqe_xrc_seg) - |
1488 | + sizeof(struct mlx5_wqe_raddr_seg)) / |
1489 | + sizeof(struct mlx5_wqe_data_seg); |
1490 | + else |
1491 | + max_sge = (wqe_size - sq_overhead(attr)) / |
1492 | + sizeof(struct mlx5_wqe_data_seg); |
1493 | + |
1494 | + return min_t(int, max_sge, wqe_size - sq_overhead(attr) / |
1495 | + sizeof(struct mlx5_wqe_data_seg)); |
1496 | +} |
1497 | + |
1498 | static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, |
1499 | struct mlx5_ib_qp *qp) |
1500 | { |
1501 | @@ -387,7 +410,11 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, |
1502 | return -ENOMEM; |
1503 | } |
1504 | qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); |
1505 | - qp->sq.max_gs = attr->cap.max_send_sge; |
1506 | + qp->sq.max_gs = get_send_sge(attr, wqe_size); |
1507 | + if (qp->sq.max_gs < attr->cap.max_send_sge) |
1508 | + return -ENOMEM; |
1509 | + |
1510 | + attr->cap.max_send_sge = qp->sq.max_gs; |
1511 | qp->sq.max_post = wq_size / wqe_size; |
1512 | attr->cap.max_send_wr = qp->sq.max_post; |
1513 | |
1514 | diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c |
1515 | index 3857dbd9c956..729b0696626e 100644 |
1516 | --- a/drivers/infiniband/hw/mlx5/srq.c |
1517 | +++ b/drivers/infiniband/hw/mlx5/srq.c |
1518 | @@ -282,6 +282,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, |
1519 | mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", |
1520 | desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, |
1521 | srq->msrq.max_avail_gather); |
1522 | + in.type = init_attr->srq_type; |
1523 | |
1524 | if (pd->uobject) |
1525 | err = create_srq_user(pd, srq, &in, udata, buf_size); |
1526 | @@ -294,7 +295,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, |
1527 | goto err_srq; |
1528 | } |
1529 | |
1530 | - in.type = init_attr->srq_type; |
1531 | in.log_size = ilog2(srq->msrq.max); |
1532 | in.wqe_shift = srq->msrq.wqe_shift - 4; |
1533 | if (srq->wq_sig) |
1534 | diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h |
1535 | index f459c43a77c8..13ed2cc6eaa2 100644 |
1536 | --- a/drivers/infiniband/sw/rxe/rxe_param.h |
1537 | +++ b/drivers/infiniband/sw/rxe/rxe_param.h |
1538 | @@ -82,7 +82,7 @@ enum rxe_device_param { |
1539 | RXE_MAX_SGE = 32, |
1540 | RXE_MAX_SGE_RD = 32, |
1541 | RXE_MAX_CQ = 16384, |
1542 | - RXE_MAX_LOG_CQE = 13, |
1543 | + RXE_MAX_LOG_CQE = 15, |
1544 | RXE_MAX_MR = 2 * 1024, |
1545 | RXE_MAX_PD = 0x7ffc, |
1546 | RXE_MAX_QP_RD_ATOM = 128, |
1547 | diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c |
1548 | index 22bd9630dcd9..9f46be52335e 100644 |
1549 | --- a/drivers/infiniband/sw/rxe/rxe_req.c |
1550 | +++ b/drivers/infiniband/sw/rxe/rxe_req.c |
1551 | @@ -548,23 +548,23 @@ static void update_wqe_psn(struct rxe_qp *qp, |
1552 | static void save_state(struct rxe_send_wqe *wqe, |
1553 | struct rxe_qp *qp, |
1554 | struct rxe_send_wqe *rollback_wqe, |
1555 | - struct rxe_qp *rollback_qp) |
1556 | + u32 *rollback_psn) |
1557 | { |
1558 | rollback_wqe->state = wqe->state; |
1559 | rollback_wqe->first_psn = wqe->first_psn; |
1560 | rollback_wqe->last_psn = wqe->last_psn; |
1561 | - rollback_qp->req.psn = qp->req.psn; |
1562 | + *rollback_psn = qp->req.psn; |
1563 | } |
1564 | |
1565 | static void rollback_state(struct rxe_send_wqe *wqe, |
1566 | struct rxe_qp *qp, |
1567 | struct rxe_send_wqe *rollback_wqe, |
1568 | - struct rxe_qp *rollback_qp) |
1569 | + u32 rollback_psn) |
1570 | { |
1571 | wqe->state = rollback_wqe->state; |
1572 | wqe->first_psn = rollback_wqe->first_psn; |
1573 | wqe->last_psn = rollback_wqe->last_psn; |
1574 | - qp->req.psn = rollback_qp->req.psn; |
1575 | + qp->req.psn = rollback_psn; |
1576 | } |
1577 | |
1578 | static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, |
1579 | @@ -593,8 +593,8 @@ int rxe_requester(void *arg) |
1580 | int mtu; |
1581 | int opcode; |
1582 | int ret; |
1583 | - struct rxe_qp rollback_qp; |
1584 | struct rxe_send_wqe rollback_wqe; |
1585 | + u32 rollback_psn; |
1586 | |
1587 | next_wqe: |
1588 | if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) |
1589 | @@ -719,7 +719,7 @@ int rxe_requester(void *arg) |
1590 | * rxe_xmit_packet(). |
1591 | * Otherwise, completer might initiate an unjustified retry flow. |
1592 | */ |
1593 | - save_state(wqe, qp, &rollback_wqe, &rollback_qp); |
1594 | + save_state(wqe, qp, &rollback_wqe, &rollback_psn); |
1595 | update_wqe_state(qp, wqe, &pkt); |
1596 | update_wqe_psn(qp, wqe, &pkt, payload); |
1597 | ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); |
1598 | @@ -727,7 +727,7 @@ int rxe_requester(void *arg) |
1599 | qp->need_req_skb = 1; |
1600 | kfree_skb(skb); |
1601 | |
1602 | - rollback_state(wqe, qp, &rollback_wqe, &rollback_qp); |
1603 | + rollback_state(wqe, qp, &rollback_wqe, rollback_psn); |
1604 | |
1605 | if (ret == -EAGAIN) { |
1606 | rxe_run_task(&qp->req.task, 1); |
1607 | diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c |
1608 | index 339a1eecdfe3..81a8080c18b3 100644 |
1609 | --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c |
1610 | +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c |
1611 | @@ -1054,8 +1054,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_ |
1612 | |
1613 | tx_qp = ib_create_qp(priv->pd, &attr); |
1614 | if (PTR_ERR(tx_qp) == -EINVAL) { |
1615 | - ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n", |
1616 | - priv->ca->name); |
1617 | attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO; |
1618 | tx_qp = ib_create_qp(priv->pd, &attr); |
1619 | } |
1620 | diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c |
1621 | index 6d7de9bfed9a..b93fe83a0b63 100644 |
1622 | --- a/drivers/input/mouse/alps.c |
1623 | +++ b/drivers/input/mouse/alps.c |
1624 | @@ -1346,6 +1346,18 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse) |
1625 | |
1626 | priv->multi_packet = 0; |
1627 | |
1628 | + /* Report trackstick */ |
1629 | + if (alps_get_pkt_id_ss4_v2(packet) == SS4_PACKET_ID_STICK) { |
1630 | + if (priv->flags & ALPS_DUALPOINT) { |
1631 | + input_report_key(dev2, BTN_LEFT, f->ts_left); |
1632 | + input_report_key(dev2, BTN_RIGHT, f->ts_right); |
1633 | + input_report_key(dev2, BTN_MIDDLE, f->ts_middle); |
1634 | + input_sync(dev2); |
1635 | + } |
1636 | + return; |
1637 | + } |
1638 | + |
1639 | + /* Report touchpad */ |
1640 | alps_report_mt_data(psmouse, (f->fingers <= 4) ? f->fingers : 4); |
1641 | |
1642 | input_mt_report_finger_count(dev, f->fingers); |
1643 | @@ -1356,13 +1368,6 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse) |
1644 | |
1645 | input_report_abs(dev, ABS_PRESSURE, f->pressure); |
1646 | input_sync(dev); |
1647 | - |
1648 | - if (priv->flags & ALPS_DUALPOINT) { |
1649 | - input_report_key(dev2, BTN_LEFT, f->ts_left); |
1650 | - input_report_key(dev2, BTN_RIGHT, f->ts_right); |
1651 | - input_report_key(dev2, BTN_MIDDLE, f->ts_middle); |
1652 | - input_sync(dev2); |
1653 | - } |
1654 | } |
1655 | |
1656 | static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse) |
1657 | diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig |
1658 | index ce4a96fccc43..5ff803efdc03 100644 |
1659 | --- a/drivers/media/platform/Kconfig |
1660 | +++ b/drivers/media/platform/Kconfig |
1661 | @@ -93,7 +93,7 @@ config VIDEO_OMAP3_DEBUG |
1662 | |
1663 | config VIDEO_PXA27x |
1664 | tristate "PXA27x Quick Capture Interface driver" |
1665 | - depends on VIDEO_DEV && HAS_DMA |
1666 | + depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA |
1667 | depends on PXA27x || COMPILE_TEST |
1668 | select VIDEOBUF2_DMA_SG |
1669 | select SG_SPLIT |
1670 | diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c |
1671 | index cff63e511e6d..b8f3d9fa66e9 100644 |
1672 | --- a/drivers/media/platform/blackfin/ppi.c |
1673 | +++ b/drivers/media/platform/blackfin/ppi.c |
1674 | @@ -214,6 +214,8 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params) |
1675 | if (params->dlen > 24 || params->dlen <= 0) |
1676 | return -EINVAL; |
1677 | pctrl = devm_pinctrl_get(ppi->dev); |
1678 | + if (IS_ERR(pctrl)) |
1679 | + return PTR_ERR(pctrl); |
1680 | pstate = pinctrl_lookup_state(pctrl, |
1681 | pin_state[(params->dlen + 7) / 8 - 1]); |
1682 | if (pinctrl_select_state(pctrl, pstate)) |
1683 | diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c |
1684 | index 3436eda58855..27e7cf65c2a7 100644 |
1685 | --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c |
1686 | +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c |
1687 | @@ -926,10 +926,11 @@ static int s5p_mfc_release(struct file *file) |
1688 | mfc_debug_enter(); |
1689 | if (dev) |
1690 | mutex_lock(&dev->mfc_mutex); |
1691 | - s5p_mfc_clock_on(); |
1692 | vb2_queue_release(&ctx->vq_src); |
1693 | vb2_queue_release(&ctx->vq_dst); |
1694 | if (dev) { |
1695 | + s5p_mfc_clock_on(); |
1696 | + |
1697 | /* Mark context as idle */ |
1698 | clear_work_bit_irqsave(ctx); |
1699 | /* |
1700 | @@ -951,9 +952,9 @@ static int s5p_mfc_release(struct file *file) |
1701 | if (s5p_mfc_power_off() < 0) |
1702 | mfc_err("Power off failed\n"); |
1703 | } |
1704 | + mfc_debug(2, "Shutting down clock\n"); |
1705 | + s5p_mfc_clock_off(); |
1706 | } |
1707 | - mfc_debug(2, "Shutting down clock\n"); |
1708 | - s5p_mfc_clock_off(); |
1709 | if (dev) |
1710 | dev->ctx[ctx->num] = NULL; |
1711 | s5p_mfc_dec_ctrls_delete(ctx); |
1712 | diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c |
1713 | index d341d4994528..cf2a8d884536 100644 |
1714 | --- a/drivers/media/platform/sti/hva/hva-hw.c |
1715 | +++ b/drivers/media/platform/sti/hva/hva-hw.c |
1716 | @@ -305,16 +305,16 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva) |
1717 | /* get memory for registers */ |
1718 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1719 | hva->regs = devm_ioremap_resource(dev, regs); |
1720 | - if (IS_ERR_OR_NULL(hva->regs)) { |
1721 | + if (IS_ERR(hva->regs)) { |
1722 | dev_err(dev, "%s failed to get regs\n", HVA_PREFIX); |
1723 | return PTR_ERR(hva->regs); |
1724 | } |
1725 | |
1726 | /* get memory for esram */ |
1727 | esram = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
1728 | - if (IS_ERR_OR_NULL(esram)) { |
1729 | + if (!esram) { |
1730 | dev_err(dev, "%s failed to get esram\n", HVA_PREFIX); |
1731 | - return PTR_ERR(esram); |
1732 | + return -ENODEV; |
1733 | } |
1734 | hva->esram_addr = esram->start; |
1735 | hva->esram_size = resource_size(esram); |
1736 | diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c |
1737 | index 0f301903aa6f..63165d324fff 100644 |
1738 | --- a/drivers/media/rc/ite-cir.c |
1739 | +++ b/drivers/media/rc/ite-cir.c |
1740 | @@ -263,6 +263,8 @@ static void ite_set_carrier_params(struct ite_dev *dev) |
1741 | |
1742 | if (allowance > ITE_RXDCR_MAX) |
1743 | allowance = ITE_RXDCR_MAX; |
1744 | + |
1745 | + use_demodulator = true; |
1746 | } |
1747 | } |
1748 | |
1749 | diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c |
1750 | index d76f36233f43..5143a90219c0 100644 |
1751 | --- a/drivers/media/spi/gs1662.c |
1752 | +++ b/drivers/media/spi/gs1662.c |
1753 | @@ -453,10 +453,9 @@ static int gs_probe(struct spi_device *spi) |
1754 | static int gs_remove(struct spi_device *spi) |
1755 | { |
1756 | struct v4l2_subdev *sd = spi_get_drvdata(spi); |
1757 | - struct gs *gs = to_gs(sd); |
1758 | |
1759 | v4l2_device_unregister_subdev(sd); |
1760 | - kfree(gs); |
1761 | + |
1762 | return 0; |
1763 | } |
1764 | |
1765 | diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c |
1766 | index 44ecebd1ea8c..c8b8ac66ff7e 100644 |
1767 | --- a/drivers/mmc/host/mxs-mmc.c |
1768 | +++ b/drivers/mmc/host/mxs-mmc.c |
1769 | @@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) |
1770 | cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); |
1771 | cmd1 = cmd->arg; |
1772 | |
1773 | + if (cmd->opcode == MMC_STOP_TRANSMISSION) |
1774 | + cmd0 |= BM_SSP_CMD0_APPEND_8CYC; |
1775 | + |
1776 | if (host->sdio_irq_en) { |
1777 | ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; |
1778 | cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; |
1779 | @@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) |
1780 | ssp->base + HW_SSP_BLOCK_SIZE); |
1781 | } |
1782 | |
1783 | - if ((cmd->opcode == MMC_STOP_TRANSMISSION) || |
1784 | - (cmd->opcode == SD_IO_RW_EXTENDED)) |
1785 | + if (cmd->opcode == SD_IO_RW_EXTENDED) |
1786 | cmd0 |= BM_SSP_CMD0_APPEND_8CYC; |
1787 | |
1788 | cmd1 = cmd->arg; |
1789 | diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c |
1790 | index 81d4dc034793..fddd0be196f4 100644 |
1791 | --- a/drivers/mmc/host/sdhci-acpi.c |
1792 | +++ b/drivers/mmc/host/sdhci-acpi.c |
1793 | @@ -394,7 +394,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev) |
1794 | /* Power on the SDHCI controller and its children */ |
1795 | acpi_device_fix_up_power(device); |
1796 | list_for_each_entry(child, &device->children, node) |
1797 | - acpi_device_fix_up_power(child); |
1798 | + if (child->status.present && child->status.enabled) |
1799 | + acpi_device_fix_up_power(child); |
1800 | |
1801 | if (acpi_bus_get_status(device) || !device->status.present) |
1802 | return -ENODEV; |
1803 | diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig |
1804 | index 7b7a887b4709..b254090b8a1b 100644 |
1805 | --- a/drivers/mtd/nand/Kconfig |
1806 | +++ b/drivers/mtd/nand/Kconfig |
1807 | @@ -537,7 +537,7 @@ config MTD_NAND_FSMC |
1808 | Flexible Static Memory Controller (FSMC) |
1809 | |
1810 | config MTD_NAND_XWAY |
1811 | - tristate "Support for NAND on Lantiq XWAY SoC" |
1812 | + bool "Support for NAND on Lantiq XWAY SoC" |
1813 | depends on LANTIQ && SOC_TYPE_XWAY |
1814 | help |
1815 | Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached |
1816 | diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c |
1817 | index 852388171f20..bc6e49af063a 100644 |
1818 | --- a/drivers/mtd/nand/lpc32xx_mlc.c |
1819 | +++ b/drivers/mtd/nand/lpc32xx_mlc.c |
1820 | @@ -776,7 +776,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) |
1821 | init_completion(&host->comp_controller); |
1822 | |
1823 | host->irq = platform_get_irq(pdev, 0); |
1824 | - if ((host->irq < 0) || (host->irq >= NR_IRQS)) { |
1825 | + if (host->irq < 0) { |
1826 | dev_err(&pdev->dev, "failed to get platform irq\n"); |
1827 | res = -EINVAL; |
1828 | goto err_exit3; |
1829 | diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c |
1830 | index 1f2948c0c458..895101a5e686 100644 |
1831 | --- a/drivers/mtd/nand/xway_nand.c |
1832 | +++ b/drivers/mtd/nand/xway_nand.c |
1833 | @@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = { |
1834 | { .compatible = "lantiq,nand-xway" }, |
1835 | {}, |
1836 | }; |
1837 | -MODULE_DEVICE_TABLE(of, xway_nand_match); |
1838 | |
1839 | static struct platform_driver xway_nand_driver = { |
1840 | .probe = xway_nand_probe, |
1841 | @@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = { |
1842 | }, |
1843 | }; |
1844 | |
1845 | -module_platform_driver(xway_nand_driver); |
1846 | - |
1847 | -MODULE_LICENSE("GPL"); |
1848 | +builtin_platform_driver(xway_nand_driver); |
1849 | diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c |
1850 | index d403ba7b8f43..d489fbd07c12 100644 |
1851 | --- a/drivers/mtd/spi-nor/cadence-quadspi.c |
1852 | +++ b/drivers/mtd/spi-nor/cadence-quadspi.c |
1853 | @@ -1077,12 +1077,14 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np) |
1854 | |
1855 | /* Get flash device data */ |
1856 | for_each_available_child_of_node(dev->of_node, np) { |
1857 | - if (of_property_read_u32(np, "reg", &cs)) { |
1858 | + ret = of_property_read_u32(np, "reg", &cs); |
1859 | + if (ret) { |
1860 | dev_err(dev, "Couldn't determine chip select.\n"); |
1861 | goto err; |
1862 | } |
1863 | |
1864 | - if (cs > CQSPI_MAX_CHIPSELECT) { |
1865 | + if (cs >= CQSPI_MAX_CHIPSELECT) { |
1866 | + ret = -EINVAL; |
1867 | dev_err(dev, "Chip select %d out of range.\n", cs); |
1868 | goto err; |
1869 | } |
1870 | diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c |
1871 | index 1056ed142411..f186e0460cde 100644 |
1872 | --- a/drivers/net/ieee802154/atusb.c |
1873 | +++ b/drivers/net/ieee802154/atusb.c |
1874 | @@ -112,13 +112,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg) |
1875 | { |
1876 | struct usb_device *usb_dev = atusb->usb_dev; |
1877 | int ret; |
1878 | + uint8_t *buffer; |
1879 | uint8_t value; |
1880 | |
1881 | + buffer = kmalloc(1, GFP_KERNEL); |
1882 | + if (!buffer) |
1883 | + return -ENOMEM; |
1884 | + |
1885 | dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg); |
1886 | ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), |
1887 | ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, |
1888 | - 0, reg, &value, 1, 1000); |
1889 | - return ret >= 0 ? value : ret; |
1890 | + 0, reg, buffer, 1, 1000); |
1891 | + |
1892 | + if (ret >= 0) { |
1893 | + value = buffer[0]; |
1894 | + kfree(buffer); |
1895 | + return value; |
1896 | + } else { |
1897 | + kfree(buffer); |
1898 | + return ret; |
1899 | + } |
1900 | } |
1901 | |
1902 | static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask, |
1903 | @@ -587,9 +600,13 @@ static struct ieee802154_ops atusb_ops = { |
1904 | static int atusb_get_and_show_revision(struct atusb *atusb) |
1905 | { |
1906 | struct usb_device *usb_dev = atusb->usb_dev; |
1907 | - unsigned char buffer[3]; |
1908 | + unsigned char *buffer; |
1909 | int ret; |
1910 | |
1911 | + buffer = kmalloc(3, GFP_KERNEL); |
1912 | + if (!buffer) |
1913 | + return -ENOMEM; |
1914 | + |
1915 | /* Get a couple of the ATMega Firmware values */ |
1916 | ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), |
1917 | ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0, |
1918 | @@ -605,15 +622,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb) |
1919 | dev_info(&usb_dev->dev, "Please update to version 0.2 or newer"); |
1920 | } |
1921 | |
1922 | + kfree(buffer); |
1923 | return ret; |
1924 | } |
1925 | |
1926 | static int atusb_get_and_show_build(struct atusb *atusb) |
1927 | { |
1928 | struct usb_device *usb_dev = atusb->usb_dev; |
1929 | - char build[ATUSB_BUILD_SIZE + 1]; |
1930 | + char *build; |
1931 | int ret; |
1932 | |
1933 | + build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL); |
1934 | + if (!build) |
1935 | + return -ENOMEM; |
1936 | + |
1937 | ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), |
1938 | ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, |
1939 | build, ATUSB_BUILD_SIZE, 1000); |
1940 | @@ -622,6 +644,7 @@ static int atusb_get_and_show_build(struct atusb *atusb) |
1941 | dev_info(&usb_dev->dev, "Firmware: build %s\n", build); |
1942 | } |
1943 | |
1944 | + kfree(build); |
1945 | return ret; |
1946 | } |
1947 | |
1948 | diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c |
1949 | index abe5c6bc756c..1480734c2d6e 100644 |
1950 | --- a/drivers/nvdimm/namespace_devs.c |
1951 | +++ b/drivers/nvdimm/namespace_devs.c |
1952 | @@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) |
1953 | { |
1954 | resource_size_t allocated = 0, available = 0; |
1955 | struct nd_region *nd_region = to_nd_region(dev->parent); |
1956 | + struct nd_namespace_common *ndns = to_ndns(dev); |
1957 | struct nd_mapping *nd_mapping; |
1958 | struct nvdimm_drvdata *ndd; |
1959 | struct nd_label_id label_id; |
1960 | @@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) |
1961 | u8 *uuid = NULL; |
1962 | int rc, i; |
1963 | |
1964 | - if (dev->driver || to_ndns(dev)->claim) |
1965 | + if (dev->driver || ndns->claim) |
1966 | return -EBUSY; |
1967 | |
1968 | if (is_namespace_pmem(dev)) { |
1969 | @@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) |
1970 | |
1971 | nd_namespace_pmem_set_resource(nd_region, nspm, |
1972 | val * nd_region->ndr_mappings); |
1973 | - } else if (is_namespace_blk(dev)) { |
1974 | - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); |
1975 | - |
1976 | - /* |
1977 | - * Try to delete the namespace if we deleted all of its |
1978 | - * allocation, this is not the seed device for the |
1979 | - * region, and it is not actively claimed by a btt |
1980 | - * instance. |
1981 | - */ |
1982 | - if (val == 0 && nd_region->ns_seed != dev |
1983 | - && !nsblk->common.claim) |
1984 | - nd_device_unregister(dev, ND_ASYNC); |
1985 | } |
1986 | |
1987 | + /* |
1988 | + * Try to delete the namespace if we deleted all of its |
1989 | + * allocation, this is not the seed device for the region, and |
1990 | + * it is not actively claimed by a btt instance. |
1991 | + */ |
1992 | + if (val == 0 && nd_region->ns_seed != dev && !ndns->claim) |
1993 | + nd_device_unregister(dev, ND_ASYNC); |
1994 | + |
1995 | return rc; |
1996 | } |
1997 | |
1998 | diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c |
1999 | index bed19994c1e9..af8f6e92e885 100644 |
2000 | --- a/drivers/pci/host/pcie-designware.c |
2001 | +++ b/drivers/pci/host/pcie-designware.c |
2002 | @@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp) |
2003 | { |
2004 | u32 val; |
2005 | |
2006 | - /* get iATU unroll support */ |
2007 | - pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); |
2008 | - dev_dbg(pp->dev, "iATU unroll: %s\n", |
2009 | - pp->iatu_unroll_enabled ? "enabled" : "disabled"); |
2010 | - |
2011 | /* set the number of lanes */ |
2012 | val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); |
2013 | val &= ~PORT_LINK_MODE_MASK; |
2014 | @@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp) |
2015 | * we should not program the ATU here. |
2016 | */ |
2017 | if (!pp->ops->rd_other_conf) { |
2018 | + /* get iATU unroll support */ |
2019 | + pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); |
2020 | + dev_dbg(pp->dev, "iATU unroll: %s\n", |
2021 | + pp->iatu_unroll_enabled ? "enabled" : "disabled"); |
2022 | + |
2023 | dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, |
2024 | PCIE_ATU_TYPE_MEM, pp->mem_base, |
2025 | pp->mem_bus_addr, pp->mem_size); |
2026 | diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c |
2027 | index 104c46d53121..300770cdc084 100644 |
2028 | --- a/drivers/pci/probe.c |
2029 | +++ b/drivers/pci/probe.c |
2030 | @@ -1050,6 +1050,7 @@ void set_pcie_port_type(struct pci_dev *pdev) |
2031 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
2032 | if (!pos) |
2033 | return; |
2034 | + |
2035 | pdev->pcie_cap = pos; |
2036 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); |
2037 | pdev->pcie_flags_reg = reg16; |
2038 | @@ -1057,13 +1058,14 @@ void set_pcie_port_type(struct pci_dev *pdev) |
2039 | pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; |
2040 | |
2041 | /* |
2042 | - * A Root Port is always the upstream end of a Link. No PCIe |
2043 | - * component has two Links. Two Links are connected by a Switch |
2044 | - * that has a Port on each Link and internal logic to connect the |
2045 | - * two Ports. |
2046 | + * A Root Port or a PCI-to-PCIe bridge is always the upstream end |
2047 | + * of a Link. No PCIe component has two Links. Two Links are |
2048 | + * connected by a Switch that has a Port on each Link and internal |
2049 | + * logic to connect the two Ports. |
2050 | */ |
2051 | type = pci_pcie_type(pdev); |
2052 | - if (type == PCI_EXP_TYPE_ROOT_PORT) |
2053 | + if (type == PCI_EXP_TYPE_ROOT_PORT || |
2054 | + type == PCI_EXP_TYPE_PCIE_BRIDGE) |
2055 | pdev->has_secondary_link = 1; |
2056 | else if (type == PCI_EXP_TYPE_UPSTREAM || |
2057 | type == PCI_EXP_TYPE_DOWNSTREAM) { |
2058 | diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c |
2059 | index b6ea9ffa7381..e0a629eaceab 100644 |
2060 | --- a/drivers/rpmsg/rpmsg_core.c |
2061 | +++ b/drivers/rpmsg/rpmsg_core.c |
2062 | @@ -411,8 +411,8 @@ int rpmsg_register_device(struct rpmsg_device *rpdev) |
2063 | struct device *dev = &rpdev->dev; |
2064 | int ret; |
2065 | |
2066 | - dev_set_name(&rpdev->dev, "%s:%s", |
2067 | - dev_name(dev->parent), rpdev->id.name); |
2068 | + dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent), |
2069 | + rpdev->id.name, rpdev->src, rpdev->dst); |
2070 | |
2071 | rpdev->dev.bus = &rpmsg_bus; |
2072 | rpdev->dev.release = rpmsg_release_device; |
2073 | diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c |
2074 | index 608140f16d98..e3b911c895b4 100644 |
2075 | --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c |
2076 | +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c |
2077 | @@ -45,6 +45,7 @@ |
2078 | |
2079 | #define INITIAL_SRP_LIMIT 800 |
2080 | #define DEFAULT_MAX_SECTORS 256 |
2081 | +#define MAX_TXU 1024 * 1024 |
2082 | |
2083 | static uint max_vdma_size = MAX_H_COPY_RDMA; |
2084 | |
2085 | @@ -1239,7 +1240,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, |
2086 | } |
2087 | |
2088 | info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, |
2089 | - GFP_KERNEL); |
2090 | + GFP_ATOMIC); |
2091 | if (!info) { |
2092 | dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", |
2093 | iue->target); |
2094 | @@ -1291,7 +1292,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, |
2095 | info->mad_version = cpu_to_be32(MAD_VERSION_1); |
2096 | info->os_type = cpu_to_be32(LINUX); |
2097 | memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); |
2098 | - info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE); |
2099 | + info->port_max_txu[0] = cpu_to_be32(MAX_TXU); |
2100 | |
2101 | dma_wmb(); |
2102 | rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, |
2103 | @@ -1357,7 +1358,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue) |
2104 | } |
2105 | |
2106 | cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, |
2107 | - GFP_KERNEL); |
2108 | + GFP_ATOMIC); |
2109 | if (!cap) { |
2110 | dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", |
2111 | iue->target); |
2112 | diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h |
2113 | index 3e71bc1b4a80..7008061c4b5b 100644 |
2114 | --- a/drivers/scsi/mpt3sas/mpt3sas_base.h |
2115 | +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h |
2116 | @@ -393,6 +393,7 @@ struct MPT3SAS_TARGET { |
2117 | * @eedp_enable: eedp support enable bit |
2118 | * @eedp_type: 0(type_1), 1(type_2), 2(type_3) |
2119 | * @eedp_block_length: block size |
2120 | + * @ata_command_pending: SATL passthrough outstanding for device |
2121 | */ |
2122 | struct MPT3SAS_DEVICE { |
2123 | struct MPT3SAS_TARGET *sas_target; |
2124 | @@ -402,6 +403,17 @@ struct MPT3SAS_DEVICE { |
2125 | u8 block; |
2126 | u8 tlr_snoop_check; |
2127 | u8 ignore_delay_remove; |
2128 | + /* |
2129 | + * Bug workaround for SATL handling: the mpt2/3sas firmware |
2130 | + * doesn't return BUSY or TASK_SET_FULL for subsequent |
2131 | + * commands while a SATL pass through is in operation as the |
2132 | + * spec requires, it simply does nothing with them until the |
2133 | + * pass through completes, causing them possibly to timeout if |
2134 | + * the passthrough is a long executing command (like format or |
2135 | + * secure erase). This variable allows us to do the right |
2136 | + * thing while a SATL command is pending. |
2137 | + */ |
2138 | + unsigned long ata_command_pending; |
2139 | }; |
2140 | |
2141 | #define MPT3_CMD_NOT_USED 0x8000 /* free */ |
2142 | diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
2143 | index 1c4744e78173..f84a6087cebd 100644 |
2144 | --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
2145 | +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
2146 | @@ -3885,9 +3885,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, |
2147 | } |
2148 | } |
2149 | |
2150 | -static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) |
2151 | +static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) |
2152 | { |
2153 | - return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); |
2154 | + struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; |
2155 | + |
2156 | + if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) |
2157 | + return 0; |
2158 | + |
2159 | + if (pending) |
2160 | + return test_and_set_bit(0, &priv->ata_command_pending); |
2161 | + |
2162 | + clear_bit(0, &priv->ata_command_pending); |
2163 | + return 0; |
2164 | } |
2165 | |
2166 | /** |
2167 | @@ -3911,9 +3920,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) |
2168 | if (!scmd) |
2169 | continue; |
2170 | count++; |
2171 | - if (ata_12_16_cmd(scmd)) |
2172 | - scsi_internal_device_unblock(scmd->device, |
2173 | - SDEV_RUNNING); |
2174 | + _scsih_set_satl_pending(scmd, false); |
2175 | mpt3sas_base_free_smid(ioc, smid); |
2176 | scsi_dma_unmap(scmd); |
2177 | if (ioc->pci_error_recovery) |
2178 | @@ -4044,13 +4051,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) |
2179 | if (ioc->logging_level & MPT_DEBUG_SCSI) |
2180 | scsi_print_command(scmd); |
2181 | |
2182 | - /* |
2183 | - * Lock the device for any subsequent command until command is |
2184 | - * done. |
2185 | - */ |
2186 | - if (ata_12_16_cmd(scmd)) |
2187 | - scsi_internal_device_block(scmd->device); |
2188 | - |
2189 | sas_device_priv_data = scmd->device->hostdata; |
2190 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { |
2191 | scmd->result = DID_NO_CONNECT << 16; |
2192 | @@ -4064,6 +4064,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) |
2193 | return 0; |
2194 | } |
2195 | |
2196 | + /* |
2197 | + * Bug work around for firmware SATL handling. The loop |
2198 | + * is based on atomic operations and ensures consistency |
2199 | + * since we're lockless at this point |
2200 | + */ |
2201 | + do { |
2202 | + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { |
2203 | + scmd->result = SAM_STAT_BUSY; |
2204 | + scmd->scsi_done(scmd); |
2205 | + return 0; |
2206 | + } |
2207 | + } while (_scsih_set_satl_pending(scmd, true)); |
2208 | + |
2209 | sas_target_priv_data = sas_device_priv_data->sas_target; |
2210 | |
2211 | /* invalid device handle */ |
2212 | @@ -4626,8 +4639,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) |
2213 | if (scmd == NULL) |
2214 | return 1; |
2215 | |
2216 | - if (ata_12_16_cmd(scmd)) |
2217 | - scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); |
2218 | + _scsih_set_satl_pending(scmd, false); |
2219 | |
2220 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
2221 | |
2222 | diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c |
2223 | index 56d6142852a5..078d797cb492 100644 |
2224 | --- a/drivers/scsi/qla2xxx/qla_os.c |
2225 | +++ b/drivers/scsi/qla2xxx/qla_os.c |
2226 | @@ -3489,7 +3489,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, |
2227 | sizeof(struct ct6_dsd), 0, |
2228 | SLAB_HWCACHE_ALIGN, NULL); |
2229 | if (!ctx_cachep) |
2230 | - goto fail_free_gid_list; |
2231 | + goto fail_free_srb_mempool; |
2232 | } |
2233 | ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, |
2234 | ctx_cachep); |
2235 | @@ -3642,7 +3642,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, |
2236 | ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), |
2237 | GFP_KERNEL); |
2238 | if (!ha->loop_id_map) |
2239 | - goto fail_async_pd; |
2240 | + goto fail_loop_id_map; |
2241 | else { |
2242 | qla2x00_set_reserved_loop_ids(ha); |
2243 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, |
2244 | @@ -3651,6 +3651,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, |
2245 | |
2246 | return 0; |
2247 | |
2248 | +fail_loop_id_map: |
2249 | + dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); |
2250 | fail_async_pd: |
2251 | dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); |
2252 | fail_ex_init_cb: |
2253 | @@ -3678,6 +3680,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, |
2254 | dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); |
2255 | ha->ms_iocb = NULL; |
2256 | ha->ms_iocb_dma = 0; |
2257 | + |
2258 | + if (ha->sns_cmd) |
2259 | + dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), |
2260 | + ha->sns_cmd, ha->sns_cmd_dma); |
2261 | fail_dma_pool: |
2262 | if (IS_QLA82XX(ha) || ql2xenabledif) { |
2263 | dma_pool_destroy(ha->fcp_cmnd_dma_pool); |
2264 | @@ -3695,10 +3701,12 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, |
2265 | kfree(ha->nvram); |
2266 | ha->nvram = NULL; |
2267 | fail_free_ctx_mempool: |
2268 | - mempool_destroy(ha->ctx_mempool); |
2269 | + if (ha->ctx_mempool) |
2270 | + mempool_destroy(ha->ctx_mempool); |
2271 | ha->ctx_mempool = NULL; |
2272 | fail_free_srb_mempool: |
2273 | - mempool_destroy(ha->srb_mempool); |
2274 | + if (ha->srb_mempool) |
2275 | + mempool_destroy(ha->srb_mempool); |
2276 | ha->srb_mempool = NULL; |
2277 | fail_free_gid_list: |
2278 | dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), |
2279 | diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c |
2280 | index 8c9a35c91705..50adabbb5808 100644 |
2281 | --- a/drivers/scsi/ses.c |
2282 | +++ b/drivers/scsi/ses.c |
2283 | @@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev, |
2284 | |
2285 | ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); |
2286 | |
2287 | - if (scsi_is_sas_rphy(&sdev->sdev_gendev)) |
2288 | + if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent)) |
2289 | efd.addr = sas_get_address(sdev); |
2290 | |
2291 | if (efd.addr) { |
2292 | diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c |
2293 | index 8823cc81ae45..5bb376009d98 100644 |
2294 | --- a/drivers/soc/ti/wkup_m3_ipc.c |
2295 | +++ b/drivers/soc/ti/wkup_m3_ipc.c |
2296 | @@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) |
2297 | |
2298 | if (IS_ERR(task)) { |
2299 | dev_err(dev, "can't create rproc_boot thread\n"); |
2300 | + ret = PTR_ERR(task); |
2301 | goto err_put_rproc; |
2302 | } |
2303 | |
2304 | diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c |
2305 | index dd7b5b47291d..d6239fa718be 100644 |
2306 | --- a/drivers/spi/spi-pxa2xx.c |
2307 | +++ b/drivers/spi/spi-pxa2xx.c |
2308 | @@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) |
2309 | pxa2xx_spi_write(drv_data, SSCR1, tmp); |
2310 | tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8); |
2311 | pxa2xx_spi_write(drv_data, SSCR0, tmp); |
2312 | + break; |
2313 | default: |
2314 | tmp = SSCR1_RxTresh(RX_THRESH_DFLT) | |
2315 | SSCR1_TxTresh(TX_THRESH_DFLT); |
2316 | diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c |
2317 | index 1780a08b73c9..58d756231136 100644 |
2318 | --- a/drivers/staging/media/s5p-cec/s5p_cec.c |
2319 | +++ b/drivers/staging/media/s5p-cec/s5p_cec.c |
2320 | @@ -231,7 +231,7 @@ static int s5p_cec_remove(struct platform_device *pdev) |
2321 | return 0; |
2322 | } |
2323 | |
2324 | -static int s5p_cec_runtime_suspend(struct device *dev) |
2325 | +static int __maybe_unused s5p_cec_runtime_suspend(struct device *dev) |
2326 | { |
2327 | struct s5p_cec_dev *cec = dev_get_drvdata(dev); |
2328 | |
2329 | @@ -239,7 +239,7 @@ static int s5p_cec_runtime_suspend(struct device *dev) |
2330 | return 0; |
2331 | } |
2332 | |
2333 | -static int s5p_cec_runtime_resume(struct device *dev) |
2334 | +static int __maybe_unused s5p_cec_runtime_resume(struct device *dev) |
2335 | { |
2336 | struct s5p_cec_dev *cec = dev_get_drvdata(dev); |
2337 | int ret; |
2338 | diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c |
2339 | index 87e6035c9e81..8e7a3d646531 100644 |
2340 | --- a/drivers/xen/swiotlb-xen.c |
2341 | +++ b/drivers/xen/swiotlb-xen.c |
2342 | @@ -392,7 +392,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, |
2343 | if (dma_capable(dev, dev_addr, size) && |
2344 | !range_straddles_page_boundary(phys, size) && |
2345 | !xen_arch_need_swiotlb(dev, phys, dev_addr) && |
2346 | - !swiotlb_force) { |
2347 | + (swiotlb_force != SWIOTLB_FORCE)) { |
2348 | /* we are not interested in the dma_addr returned by |
2349 | * xen_dma_map_page, only in the potential cache flushes executed |
2350 | * by the function. */ |
2351 | @@ -549,7 +549,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
2352 | phys_addr_t paddr = sg_phys(sg); |
2353 | dma_addr_t dev_addr = xen_phys_to_bus(paddr); |
2354 | |
2355 | - if (swiotlb_force || |
2356 | + if (swiotlb_force == SWIOTLB_FORCE || |
2357 | xen_arch_need_swiotlb(hwdev, paddr, dev_addr) || |
2358 | !dma_capable(hwdev, dev_addr, sg->length) || |
2359 | range_straddles_page_boundary(paddr, sg->length)) { |
2360 | diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c |
2361 | index 16e6ded0b7f2..f3f21105b860 100644 |
2362 | --- a/fs/ceph/caps.c |
2363 | +++ b/fs/ceph/caps.c |
2364 | @@ -2507,9 +2507,20 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, |
2365 | if (err < 0) |
2366 | ret = err; |
2367 | } else { |
2368 | - ret = wait_event_interruptible(ci->i_cap_wq, |
2369 | - try_get_cap_refs(ci, need, want, endoff, |
2370 | - true, &_got, &err)); |
2371 | + DEFINE_WAIT_FUNC(wait, woken_wake_function); |
2372 | + add_wait_queue(&ci->i_cap_wq, &wait); |
2373 | + |
2374 | + while (!try_get_cap_refs(ci, need, want, endoff, |
2375 | + true, &_got, &err)) { |
2376 | + if (signal_pending(current)) { |
2377 | + ret = -ERESTARTSYS; |
2378 | + break; |
2379 | + } |
2380 | + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
2381 | + } |
2382 | + |
2383 | + remove_wait_queue(&ci->i_cap_wq, &wait); |
2384 | + |
2385 | if (err == -EAGAIN) |
2386 | continue; |
2387 | if (err < 0) |
2388 | diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c |
2389 | index a594c7879cc2..1afa11191000 100644 |
2390 | --- a/fs/ceph/dir.c |
2391 | +++ b/fs/ceph/dir.c |
2392 | @@ -1255,7 +1255,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) |
2393 | struct ceph_mds_client *mdsc = |
2394 | ceph_sb_to_client(dir->i_sb)->mdsc; |
2395 | struct ceph_mds_request *req; |
2396 | - int op, mask, err; |
2397 | + int op, err; |
2398 | + u32 mask; |
2399 | |
2400 | if (flags & LOOKUP_RCU) |
2401 | return -ECHILD; |
2402 | @@ -1270,7 +1271,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) |
2403 | mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; |
2404 | if (ceph_security_xattr_wanted(dir)) |
2405 | mask |= CEPH_CAP_XATTR_SHARED; |
2406 | - req->r_args.getattr.mask = mask; |
2407 | + req->r_args.getattr.mask = cpu_to_le32(mask); |
2408 | |
2409 | err = ceph_mdsc_do_request(mdsc, NULL, req); |
2410 | switch (err) { |
2411 | diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c |
2412 | index ef4d04647325..12f2252f6c98 100644 |
2413 | --- a/fs/ceph/inode.c |
2414 | +++ b/fs/ceph/inode.c |
2415 | @@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r) |
2416 | { |
2417 | struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; |
2418 | struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r; |
2419 | - return ceph_frag_compare(ls->frag, rs->frag); |
2420 | + return ceph_frag_compare(le32_to_cpu(ls->frag), |
2421 | + le32_to_cpu(rs->frag)); |
2422 | } |
2423 | |
2424 | static bool is_frag_child(u32 f, struct ceph_inode_frag *frag) |
2425 | diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c |
2426 | index 815acd1a56d4..6a26c7bd1286 100644 |
2427 | --- a/fs/ceph/mds_client.c |
2428 | +++ b/fs/ceph/mds_client.c |
2429 | @@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end, |
2430 | struct ceph_mds_reply_info_parsed *info, |
2431 | u64 features) |
2432 | { |
2433 | - if (info->head->op == CEPH_MDS_OP_GETFILELOCK) |
2434 | + u32 op = le32_to_cpu(info->head->op); |
2435 | + |
2436 | + if (op == CEPH_MDS_OP_GETFILELOCK) |
2437 | return parse_reply_info_filelock(p, end, info, features); |
2438 | - else if (info->head->op == CEPH_MDS_OP_READDIR || |
2439 | - info->head->op == CEPH_MDS_OP_LSSNAP) |
2440 | + else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP) |
2441 | return parse_reply_info_dir(p, end, info, features); |
2442 | - else if (info->head->op == CEPH_MDS_OP_CREATE) |
2443 | + else if (op == CEPH_MDS_OP_CREATE) |
2444 | return parse_reply_info_create(p, end, info, features); |
2445 | else |
2446 | return -EIO; |
2447 | diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c |
2448 | index 70ea57c7b6bb..4e06a27ed7f8 100644 |
2449 | --- a/fs/fuse/dev.c |
2450 | +++ b/fs/fuse/dev.c |
2451 | @@ -2025,7 +2025,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head) |
2452 | struct fuse_req *req; |
2453 | req = list_entry(head->next, struct fuse_req, list); |
2454 | req->out.h.error = -ECONNABORTED; |
2455 | - clear_bit(FR_PENDING, &req->flags); |
2456 | clear_bit(FR_SENT, &req->flags); |
2457 | list_del_init(&req->list); |
2458 | request_end(fc, req); |
2459 | @@ -2103,6 +2102,8 @@ void fuse_abort_conn(struct fuse_conn *fc) |
2460 | spin_lock(&fiq->waitq.lock); |
2461 | fiq->connected = 0; |
2462 | list_splice_init(&fiq->pending, &to_end2); |
2463 | + list_for_each_entry(req, &to_end2, list) |
2464 | + clear_bit(FR_PENDING, &req->flags); |
2465 | while (forget_pending(fiq)) |
2466 | kfree(dequeue_forget(fiq, 1, NULL)); |
2467 | wake_up_all_locked(&fiq->waitq); |
2468 | diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c |
2469 | index 096f79997f75..642c57b8de7b 100644 |
2470 | --- a/fs/fuse/dir.c |
2471 | +++ b/fs/fuse/dir.c |
2472 | @@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec) |
2473 | if (sec || nsec) { |
2474 | struct timespec64 ts = { |
2475 | sec, |
2476 | - max_t(u32, nsec, NSEC_PER_SEC - 1) |
2477 | + min_t(u32, nsec, NSEC_PER_SEC - 1) |
2478 | }; |
2479 | |
2480 | return get_jiffies_64() + timespec64_to_jiffies(&ts); |
2481 | diff --git a/fs/posix_acl.c b/fs/posix_acl.c |
2482 | index 595522022aca..c9d48dc78495 100644 |
2483 | --- a/fs/posix_acl.c |
2484 | +++ b/fs/posix_acl.c |
2485 | @@ -922,11 +922,10 @@ int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
2486 | int error; |
2487 | |
2488 | if (type == ACL_TYPE_ACCESS) { |
2489 | - error = posix_acl_equiv_mode(acl, &inode->i_mode); |
2490 | - if (error < 0) |
2491 | - return 0; |
2492 | - if (error == 0) |
2493 | - acl = NULL; |
2494 | + error = posix_acl_update_mode(inode, |
2495 | + &inode->i_mode, &acl); |
2496 | + if (error) |
2497 | + return error; |
2498 | } |
2499 | |
2500 | inode->i_ctime = current_time(inode); |
2501 | diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c |
2502 | index fa9a20cc60d6..fe5e8d4970ae 100644 |
2503 | --- a/fs/ubifs/tnc.c |
2504 | +++ b/fs/ubifs/tnc.c |
2505 | @@ -34,6 +34,11 @@ |
2506 | #include <linux/slab.h> |
2507 | #include "ubifs.h" |
2508 | |
2509 | +static int try_read_node(const struct ubifs_info *c, void *buf, int type, |
2510 | + int len, int lnum, int offs); |
2511 | +static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, |
2512 | + struct ubifs_zbranch *zbr, void *node); |
2513 | + |
2514 | /* |
2515 | * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. |
2516 | * @NAME_LESS: name corresponding to the first argument is less than second |
2517 | @@ -402,7 +407,19 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr, |
2518 | return 0; |
2519 | } |
2520 | |
2521 | - err = ubifs_tnc_read_node(c, zbr, node); |
2522 | + if (c->replaying) { |
2523 | + err = fallible_read_node(c, &zbr->key, zbr, node); |
2524 | + /* |
2525 | + * When the node was not found, return -ENOENT, 0 otherwise. |
2526 | + * Negative return codes stay as-is. |
2527 | + */ |
2528 | + if (err == 0) |
2529 | + err = -ENOENT; |
2530 | + else if (err == 1) |
2531 | + err = 0; |
2532 | + } else { |
2533 | + err = ubifs_tnc_read_node(c, zbr, node); |
2534 | + } |
2535 | if (err) |
2536 | return err; |
2537 | |
2538 | @@ -2766,7 +2783,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c, |
2539 | if (nm->name) { |
2540 | if (err) { |
2541 | /* Handle collisions */ |
2542 | - err = resolve_collision(c, key, &znode, &n, nm); |
2543 | + if (c->replaying) |
2544 | + err = fallible_resolve_collision(c, key, &znode, &n, |
2545 | + nm, 0); |
2546 | + else |
2547 | + err = resolve_collision(c, key, &znode, &n, nm); |
2548 | dbg_tnc("rc returned %d, znode %p, n %d", |
2549 | err, znode, n); |
2550 | if (unlikely(err < 0)) |
2551 | diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h |
2552 | index 9d02f5317c7c..88e64846cf37 100644 |
2553 | --- a/include/dt-bindings/clock/r8a7794-clock.h |
2554 | +++ b/include/dt-bindings/clock/r8a7794-clock.h |
2555 | @@ -20,8 +20,7 @@ |
2556 | #define R8A7794_CLK_QSPI 5 |
2557 | #define R8A7794_CLK_SDH 6 |
2558 | #define R8A7794_CLK_SD0 7 |
2559 | -#define R8A7794_CLK_Z 8 |
2560 | -#define R8A7794_CLK_RCAN 9 |
2561 | +#define R8A7794_CLK_RCAN 8 |
2562 | |
2563 | /* MSTP0 */ |
2564 | #define R8A7794_CLK_MSIOF0 0 |
2565 | diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h |
2566 | index 321f9ed552a9..01f71e1d2e94 100644 |
2567 | --- a/include/linux/rcupdate.h |
2568 | +++ b/include/linux/rcupdate.h |
2569 | @@ -444,6 +444,10 @@ bool __rcu_is_watching(void); |
2570 | #error "Unknown RCU implementation specified to kernel configuration" |
2571 | #endif |
2572 | |
2573 | +#define RCU_SCHEDULER_INACTIVE 0 |
2574 | +#define RCU_SCHEDULER_INIT 1 |
2575 | +#define RCU_SCHEDULER_RUNNING 2 |
2576 | + |
2577 | /* |
2578 | * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic |
2579 | * initialization and destruction of rcu_head on the stack. rcu_head structures |
2580 | diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h |
2581 | index e5d193440374..7440290f64ac 100644 |
2582 | --- a/include/linux/sunrpc/svc_xprt.h |
2583 | +++ b/include/linux/sunrpc/svc_xprt.h |
2584 | @@ -66,6 +66,7 @@ struct svc_xprt { |
2585 | #define XPT_LISTENER 10 /* listening endpoint */ |
2586 | #define XPT_CACHE_AUTH 11 /* cache auth info */ |
2587 | #define XPT_LOCAL 12 /* connection from loopback interface */ |
2588 | +#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */ |
2589 | |
2590 | struct svc_serv *xpt_server; /* service for transport */ |
2591 | atomic_t xpt_reserved; /* space on outq that is rsvd */ |
2592 | diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h |
2593 | index 5f81f8a187f2..d2613536fd03 100644 |
2594 | --- a/include/linux/swiotlb.h |
2595 | +++ b/include/linux/swiotlb.h |
2596 | @@ -9,7 +9,13 @@ struct device; |
2597 | struct page; |
2598 | struct scatterlist; |
2599 | |
2600 | -extern int swiotlb_force; |
2601 | +enum swiotlb_force { |
2602 | + SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */ |
2603 | + SWIOTLB_FORCE, /* swiotlb=force */ |
2604 | + SWIOTLB_NO_FORCE, /* swiotlb=noforce */ |
2605 | +}; |
2606 | + |
2607 | +extern enum swiotlb_force swiotlb_force; |
2608 | |
2609 | /* |
2610 | * Maximum allowable number of contiguous slabs to map, |
2611 | diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h |
2612 | index 7ea4c5e7c448..288c0c54a2b4 100644 |
2613 | --- a/include/trace/events/swiotlb.h |
2614 | +++ b/include/trace/events/swiotlb.h |
2615 | @@ -11,16 +11,16 @@ TRACE_EVENT(swiotlb_bounced, |
2616 | TP_PROTO(struct device *dev, |
2617 | dma_addr_t dev_addr, |
2618 | size_t size, |
2619 | - int swiotlb_force), |
2620 | + enum swiotlb_force swiotlb_force), |
2621 | |
2622 | TP_ARGS(dev, dev_addr, size, swiotlb_force), |
2623 | |
2624 | TP_STRUCT__entry( |
2625 | - __string( dev_name, dev_name(dev) ) |
2626 | - __field( u64, dma_mask ) |
2627 | - __field( dma_addr_t, dev_addr ) |
2628 | - __field( size_t, size ) |
2629 | - __field( int, swiotlb_force ) |
2630 | + __string( dev_name, dev_name(dev) ) |
2631 | + __field( u64, dma_mask ) |
2632 | + __field( dma_addr_t, dev_addr ) |
2633 | + __field( size_t, size ) |
2634 | + __field( enum swiotlb_force, swiotlb_force ) |
2635 | ), |
2636 | |
2637 | TP_fast_assign( |
2638 | @@ -37,7 +37,10 @@ TRACE_EVENT(swiotlb_bounced, |
2639 | __entry->dma_mask, |
2640 | (unsigned long long)__entry->dev_addr, |
2641 | __entry->size, |
2642 | - __entry->swiotlb_force ? "swiotlb_force" : "" ) |
2643 | + __print_symbolic(__entry->swiotlb_force, |
2644 | + { SWIOTLB_NORMAL, "NORMAL" }, |
2645 | + { SWIOTLB_FORCE, "FORCE" }, |
2646 | + { SWIOTLB_NO_FORCE, "NO_FORCE" })) |
2647 | ); |
2648 | |
2649 | #endif /* _TRACE_SWIOTLB_H */ |
2650 | diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h |
2651 | index 80adef7d4c3d..0d6ff3e471be 100644 |
2652 | --- a/kernel/rcu/rcu.h |
2653 | +++ b/kernel/rcu/rcu.h |
2654 | @@ -136,6 +136,7 @@ int rcu_jiffies_till_stall_check(void); |
2655 | #define TPS(x) tracepoint_string(x) |
2656 | |
2657 | void rcu_early_boot_tests(void); |
2658 | +void rcu_test_sync_prims(void); |
2659 | |
2660 | /* |
2661 | * This function really isn't for public consumption, but RCU is special in |
2662 | diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c |
2663 | index 1898559e6b60..b23a4d076f3d 100644 |
2664 | --- a/kernel/rcu/tiny.c |
2665 | +++ b/kernel/rcu/tiny.c |
2666 | @@ -185,9 +185,6 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused |
2667 | * benefits of doing might_sleep() to reduce latency.) |
2668 | * |
2669 | * Cool, huh? (Due to Josh Triplett.) |
2670 | - * |
2671 | - * But we want to make this a static inline later. The cond_resched() |
2672 | - * currently makes this problematic. |
2673 | */ |
2674 | void synchronize_sched(void) |
2675 | { |
2676 | @@ -195,7 +192,6 @@ void synchronize_sched(void) |
2677 | lock_is_held(&rcu_lock_map) || |
2678 | lock_is_held(&rcu_sched_lock_map), |
2679 | "Illegal synchronize_sched() in RCU read-side critical section"); |
2680 | - cond_resched(); |
2681 | } |
2682 | EXPORT_SYMBOL_GPL(synchronize_sched); |
2683 | |
2684 | diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h |
2685 | index 196f0302e2f4..c64b827ecbca 100644 |
2686 | --- a/kernel/rcu/tiny_plugin.h |
2687 | +++ b/kernel/rcu/tiny_plugin.h |
2688 | @@ -60,12 +60,17 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
2689 | |
2690 | /* |
2691 | * During boot, we forgive RCU lockdep issues. After this function is |
2692 | - * invoked, we start taking RCU lockdep issues seriously. |
2693 | + * invoked, we start taking RCU lockdep issues seriously. Note that unlike |
2694 | + * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE |
2695 | + * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage. |
2696 | + * The reason for this is that Tiny RCU does not need kthreads, so does |
2697 | + * not have to care about the fact that the scheduler is half-initialized |
2698 | + * at a certain phase of the boot process. |
2699 | */ |
2700 | void __init rcu_scheduler_starting(void) |
2701 | { |
2702 | WARN_ON(nr_context_switches() > 0); |
2703 | - rcu_scheduler_active = 1; |
2704 | + rcu_scheduler_active = RCU_SCHEDULER_RUNNING; |
2705 | } |
2706 | |
2707 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
2708 | diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c |
2709 | index 69a5611a7e7c..10f62c6f48e7 100644 |
2710 | --- a/kernel/rcu/tree.c |
2711 | +++ b/kernel/rcu/tree.c |
2712 | @@ -127,13 +127,16 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ |
2713 | int sysctl_panic_on_rcu_stall __read_mostly; |
2714 | |
2715 | /* |
2716 | - * The rcu_scheduler_active variable transitions from zero to one just |
2717 | - * before the first task is spawned. So when this variable is zero, RCU |
2718 | - * can assume that there is but one task, allowing RCU to (for example) |
2719 | + * The rcu_scheduler_active variable is initialized to the value |
2720 | + * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the |
2721 | + * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, |
2722 | + * RCU can assume that there is but one task, allowing RCU to (for example) |
2723 | * optimize synchronize_rcu() to a simple barrier(). When this variable |
2724 | - * is one, RCU must actually do all the hard work required to detect real |
2725 | - * grace periods. This variable is also used to suppress boot-time false |
2726 | - * positives from lockdep-RCU error checking. |
2727 | + * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required |
2728 | + * to detect real grace periods. This variable is also used to suppress |
2729 | + * boot-time false positives from lockdep-RCU error checking. Finally, it |
2730 | + * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU |
2731 | + * is fully initialized, including all of its kthreads having been spawned. |
2732 | */ |
2733 | int rcu_scheduler_active __read_mostly; |
2734 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
2735 | @@ -3985,18 +3988,22 @@ static int __init rcu_spawn_gp_kthread(void) |
2736 | early_initcall(rcu_spawn_gp_kthread); |
2737 | |
2738 | /* |
2739 | - * This function is invoked towards the end of the scheduler's initialization |
2740 | - * process. Before this is called, the idle task might contain |
2741 | - * RCU read-side critical sections (during which time, this idle |
2742 | - * task is booting the system). After this function is called, the |
2743 | - * idle tasks are prohibited from containing RCU read-side critical |
2744 | - * sections. This function also enables RCU lockdep checking. |
2745 | + * This function is invoked towards the end of the scheduler's |
2746 | + * initialization process. Before this is called, the idle task might |
2747 | + * contain synchronous grace-period primitives (during which time, this idle |
2748 | + * task is booting the system, and such primitives are no-ops). After this |
2749 | + * function is called, any synchronous grace-period primitives are run as |
2750 | + * expedited, with the requesting task driving the grace period forward. |
2751 | + * A later core_initcall() rcu_exp_runtime_mode() will switch to full |
2752 | + * runtime RCU functionality. |
2753 | */ |
2754 | void rcu_scheduler_starting(void) |
2755 | { |
2756 | WARN_ON(num_online_cpus() != 1); |
2757 | WARN_ON(nr_context_switches() > 0); |
2758 | - rcu_scheduler_active = 1; |
2759 | + rcu_test_sync_prims(); |
2760 | + rcu_scheduler_active = RCU_SCHEDULER_INIT; |
2761 | + rcu_test_sync_prims(); |
2762 | } |
2763 | |
2764 | /* |
2765 | diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h |
2766 | index 24343eb87b58..78eba4120d46 100644 |
2767 | --- a/kernel/rcu/tree_exp.h |
2768 | +++ b/kernel/rcu/tree_exp.h |
2769 | @@ -522,18 +522,28 @@ struct rcu_exp_work { |
2770 | }; |
2771 | |
2772 | /* |
2773 | + * Common code to drive an expedited grace period forward, used by |
2774 | + * workqueues and mid-boot-time tasks. |
2775 | + */ |
2776 | +static void rcu_exp_sel_wait_wake(struct rcu_state *rsp, |
2777 | + smp_call_func_t func, unsigned long s) |
2778 | +{ |
2779 | + /* Initialize the rcu_node tree in preparation for the wait. */ |
2780 | + sync_rcu_exp_select_cpus(rsp, func); |
2781 | + |
2782 | + /* Wait and clean up, including waking everyone. */ |
2783 | + rcu_exp_wait_wake(rsp, s); |
2784 | +} |
2785 | + |
2786 | +/* |
2787 | * Work-queue handler to drive an expedited grace period forward. |
2788 | */ |
2789 | static void wait_rcu_exp_gp(struct work_struct *wp) |
2790 | { |
2791 | struct rcu_exp_work *rewp; |
2792 | |
2793 | - /* Initialize the rcu_node tree in preparation for the wait. */ |
2794 | rewp = container_of(wp, struct rcu_exp_work, rew_work); |
2795 | - sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func); |
2796 | - |
2797 | - /* Wait and clean up, including waking everyone. */ |
2798 | - rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s); |
2799 | + rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s); |
2800 | } |
2801 | |
2802 | /* |
2803 | @@ -559,12 +569,18 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, |
2804 | if (exp_funnel_lock(rsp, s)) |
2805 | return; /* Someone else did our work for us. */ |
2806 | |
2807 | - /* Marshall arguments and schedule the expedited grace period. */ |
2808 | - rew.rew_func = func; |
2809 | - rew.rew_rsp = rsp; |
2810 | - rew.rew_s = s; |
2811 | - INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); |
2812 | - schedule_work(&rew.rew_work); |
2813 | + /* Ensure that load happens before action based on it. */ |
2814 | + if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { |
2815 | + /* Direct call during scheduler init and early_initcalls(). */ |
2816 | + rcu_exp_sel_wait_wake(rsp, func, s); |
2817 | + } else { |
2818 | + /* Marshall arguments & schedule the expedited grace period. */ |
2819 | + rew.rew_func = func; |
2820 | + rew.rew_rsp = rsp; |
2821 | + rew.rew_s = s; |
2822 | + INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); |
2823 | + schedule_work(&rew.rew_work); |
2824 | + } |
2825 | |
2826 | /* Wait for expedited grace period to complete. */ |
2827 | rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); |
2828 | @@ -666,6 +682,8 @@ void synchronize_rcu_expedited(void) |
2829 | { |
2830 | struct rcu_state *rsp = rcu_state_p; |
2831 | |
2832 | + if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) |
2833 | + return; |
2834 | _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler); |
2835 | } |
2836 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
2837 | @@ -683,3 +701,15 @@ void synchronize_rcu_expedited(void) |
2838 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
2839 | |
2840 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
2841 | + |
2842 | +/* |
2843 | + * Switch to run-time mode once Tree RCU has fully initialized. |
2844 | + */ |
2845 | +static int __init rcu_exp_runtime_mode(void) |
2846 | +{ |
2847 | + rcu_test_sync_prims(); |
2848 | + rcu_scheduler_active = RCU_SCHEDULER_RUNNING; |
2849 | + rcu_test_sync_prims(); |
2850 | + return 0; |
2851 | +} |
2852 | +core_initcall(rcu_exp_runtime_mode); |
2853 | diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h |
2854 | index 85c5a883c6e3..56583e764ebf 100644 |
2855 | --- a/kernel/rcu/tree_plugin.h |
2856 | +++ b/kernel/rcu/tree_plugin.h |
2857 | @@ -670,7 +670,7 @@ void synchronize_rcu(void) |
2858 | lock_is_held(&rcu_lock_map) || |
2859 | lock_is_held(&rcu_sched_lock_map), |
2860 | "Illegal synchronize_rcu() in RCU read-side critical section"); |
2861 | - if (!rcu_scheduler_active) |
2862 | + if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) |
2863 | return; |
2864 | if (rcu_gp_is_expedited()) |
2865 | synchronize_rcu_expedited(); |
2866 | diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c |
2867 | index f19271dce0a9..4f6db7e6a117 100644 |
2868 | --- a/kernel/rcu/update.c |
2869 | +++ b/kernel/rcu/update.c |
2870 | @@ -121,11 +121,14 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held); |
2871 | * Should expedited grace-period primitives always fall back to their |
2872 | * non-expedited counterparts? Intended for use within RCU. Note |
2873 | * that if the user specifies both rcu_expedited and rcu_normal, then |
2874 | - * rcu_normal wins. |
2875 | + * rcu_normal wins. (Except during the time period during boot from |
2876 | + * when the first task is spawned until the rcu_exp_runtime_mode() |
2877 | + * core_initcall() is invoked, at which point everything is expedited.) |
2878 | */ |
2879 | bool rcu_gp_is_normal(void) |
2880 | { |
2881 | - return READ_ONCE(rcu_normal); |
2882 | + return READ_ONCE(rcu_normal) && |
2883 | + rcu_scheduler_active != RCU_SCHEDULER_INIT; |
2884 | } |
2885 | EXPORT_SYMBOL_GPL(rcu_gp_is_normal); |
2886 | |
2887 | @@ -135,13 +138,14 @@ static atomic_t rcu_expedited_nesting = |
2888 | /* |
2889 | * Should normal grace-period primitives be expedited? Intended for |
2890 | * use within RCU. Note that this function takes the rcu_expedited |
2891 | - * sysfs/boot variable into account as well as the rcu_expedite_gp() |
2892 | - * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited() |
2893 | - * returns false is a -really- bad idea. |
2894 | + * sysfs/boot variable and rcu_scheduler_active into account as well |
2895 | + * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp() |
2896 | + * until rcu_gp_is_expedited() returns false is a -really- bad idea. |
2897 | */ |
2898 | bool rcu_gp_is_expedited(void) |
2899 | { |
2900 | - return rcu_expedited || atomic_read(&rcu_expedited_nesting); |
2901 | + return rcu_expedited || atomic_read(&rcu_expedited_nesting) || |
2902 | + rcu_scheduler_active == RCU_SCHEDULER_INIT; |
2903 | } |
2904 | EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); |
2905 | |
2906 | @@ -257,7 +261,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map); |
2907 | |
2908 | int notrace debug_lockdep_rcu_enabled(void) |
2909 | { |
2910 | - return rcu_scheduler_active && debug_locks && |
2911 | + return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks && |
2912 | current->lockdep_recursion == 0; |
2913 | } |
2914 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); |
2915 | @@ -591,7 +595,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks); |
2916 | void synchronize_rcu_tasks(void) |
2917 | { |
2918 | /* Complain if the scheduler has not started. */ |
2919 | - RCU_LOCKDEP_WARN(!rcu_scheduler_active, |
2920 | + RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, |
2921 | "synchronize_rcu_tasks called too soon"); |
2922 | |
2923 | /* Wait for the grace period. */ |
2924 | @@ -813,6 +817,23 @@ static void rcu_spawn_tasks_kthread(void) |
2925 | |
2926 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
2927 | |
2928 | +/* |
2929 | + * Test each non-SRCU synchronous grace-period wait API. This is |
2930 | + * useful just after a change in mode for these primitives, and |
2931 | + * during early boot. |
2932 | + */ |
2933 | +void rcu_test_sync_prims(void) |
2934 | +{ |
2935 | + if (!IS_ENABLED(CONFIG_PROVE_RCU)) |
2936 | + return; |
2937 | + synchronize_rcu(); |
2938 | + synchronize_rcu_bh(); |
2939 | + synchronize_sched(); |
2940 | + synchronize_rcu_expedited(); |
2941 | + synchronize_rcu_bh_expedited(); |
2942 | + synchronize_sched_expedited(); |
2943 | +} |
2944 | + |
2945 | #ifdef CONFIG_PROVE_RCU |
2946 | |
2947 | /* |
2948 | @@ -865,6 +886,7 @@ void rcu_early_boot_tests(void) |
2949 | early_boot_test_call_rcu_bh(); |
2950 | if (rcu_self_test_sched) |
2951 | early_boot_test_call_rcu_sched(); |
2952 | + rcu_test_sync_prims(); |
2953 | } |
2954 | |
2955 | static int rcu_verify_early_boot_tests(void) |
2956 | diff --git a/lib/swiotlb.c b/lib/swiotlb.c |
2957 | index 22e13a0e19d7..ad1d2962d129 100644 |
2958 | --- a/lib/swiotlb.c |
2959 | +++ b/lib/swiotlb.c |
2960 | @@ -53,7 +53,7 @@ |
2961 | */ |
2962 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
2963 | |
2964 | -int swiotlb_force; |
2965 | +enum swiotlb_force swiotlb_force; |
2966 | |
2967 | /* |
2968 | * Used to do a quick range check in swiotlb_tbl_unmap_single and |
2969 | @@ -106,8 +106,12 @@ setup_io_tlb_npages(char *str) |
2970 | } |
2971 | if (*str == ',') |
2972 | ++str; |
2973 | - if (!strcmp(str, "force")) |
2974 | - swiotlb_force = 1; |
2975 | + if (!strcmp(str, "force")) { |
2976 | + swiotlb_force = SWIOTLB_FORCE; |
2977 | + } else if (!strcmp(str, "noforce")) { |
2978 | + swiotlb_force = SWIOTLB_NO_FORCE; |
2979 | + io_tlb_nslabs = 1; |
2980 | + } |
2981 | |
2982 | return 0; |
2983 | } |
2984 | @@ -541,8 +545,15 @@ static phys_addr_t |
2985 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, |
2986 | enum dma_data_direction dir) |
2987 | { |
2988 | - dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); |
2989 | + dma_addr_t start_dma_addr; |
2990 | + |
2991 | + if (swiotlb_force == SWIOTLB_NO_FORCE) { |
2992 | + dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n", |
2993 | + &phys); |
2994 | + return SWIOTLB_MAP_ERROR; |
2995 | + } |
2996 | |
2997 | + start_dma_addr = phys_to_dma(hwdev, io_tlb_start); |
2998 | return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); |
2999 | } |
3000 | |
3001 | @@ -707,6 +718,9 @@ static void |
3002 | swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, |
3003 | int do_panic) |
3004 | { |
3005 | + if (swiotlb_force == SWIOTLB_NO_FORCE) |
3006 | + return; |
3007 | + |
3008 | /* |
3009 | * Ran out of IOMMU space for this operation. This is very bad. |
3010 | * Unfortunately the drivers cannot handle this operation properly. |
3011 | @@ -749,7 +763,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
3012 | * we can safely return the device addr and not worry about bounce |
3013 | * buffering it. |
3014 | */ |
3015 | - if (dma_capable(dev, dev_addr, size) && !swiotlb_force) |
3016 | + if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE) |
3017 | return dev_addr; |
3018 | |
3019 | trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); |
3020 | @@ -888,7 +902,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, |
3021 | phys_addr_t paddr = sg_phys(sg); |
3022 | dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); |
3023 | |
3024 | - if (swiotlb_force || |
3025 | + if (swiotlb_force == SWIOTLB_FORCE || |
3026 | !dma_capable(hwdev, dev_addr, sg->length)) { |
3027 | phys_addr_t map = map_single(hwdev, sg_phys(sg), |
3028 | sg->length, dir); |
3029 | diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c |
3030 | index a0905f04bd13..b216131915e7 100644 |
3031 | --- a/net/ceph/auth_x.c |
3032 | +++ b/net/ceph/auth_x.c |
3033 | @@ -39,56 +39,58 @@ static int ceph_x_should_authenticate(struct ceph_auth_client *ac) |
3034 | return need != 0; |
3035 | } |
3036 | |
3037 | +static int ceph_x_encrypt_offset(void) |
3038 | +{ |
3039 | + return sizeof(u32) + sizeof(struct ceph_x_encrypt_header); |
3040 | +} |
3041 | + |
3042 | static int ceph_x_encrypt_buflen(int ilen) |
3043 | { |
3044 | - return sizeof(struct ceph_x_encrypt_header) + ilen + 16 + |
3045 | - sizeof(u32); |
3046 | + return ceph_x_encrypt_offset() + ilen + 16; |
3047 | } |
3048 | |
3049 | -static int ceph_x_encrypt(struct ceph_crypto_key *secret, |
3050 | - void *ibuf, int ilen, void *obuf, size_t olen) |
3051 | +static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *buf, |
3052 | + int buf_len, int plaintext_len) |
3053 | { |
3054 | - struct ceph_x_encrypt_header head = { |
3055 | - .struct_v = 1, |
3056 | - .magic = cpu_to_le64(CEPHX_ENC_MAGIC) |
3057 | - }; |
3058 | - size_t len = olen - sizeof(u32); |
3059 | + struct ceph_x_encrypt_header *hdr = buf + sizeof(u32); |
3060 | + int ciphertext_len; |
3061 | int ret; |
3062 | |
3063 | - ret = ceph_encrypt2(secret, obuf + sizeof(u32), &len, |
3064 | - &head, sizeof(head), ibuf, ilen); |
3065 | + hdr->struct_v = 1; |
3066 | + hdr->magic = cpu_to_le64(CEPHX_ENC_MAGIC); |
3067 | + |
3068 | + ret = ceph_crypt(secret, true, buf + sizeof(u32), buf_len - sizeof(u32), |
3069 | + plaintext_len + sizeof(struct ceph_x_encrypt_header), |
3070 | + &ciphertext_len); |
3071 | if (ret) |
3072 | return ret; |
3073 | - ceph_encode_32(&obuf, len); |
3074 | - return len + sizeof(u32); |
3075 | + |
3076 | + ceph_encode_32(&buf, ciphertext_len); |
3077 | + return sizeof(u32) + ciphertext_len; |
3078 | } |
3079 | |
3080 | -static int ceph_x_decrypt(struct ceph_crypto_key *secret, |
3081 | - void **p, void *end, void **obuf, size_t olen) |
3082 | +static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end) |
3083 | { |
3084 | - struct ceph_x_encrypt_header head; |
3085 | - size_t head_len = sizeof(head); |
3086 | - int len, ret; |
3087 | - |
3088 | - len = ceph_decode_32(p); |
3089 | - if (*p + len > end) |
3090 | - return -EINVAL; |
3091 | + struct ceph_x_encrypt_header *hdr = *p + sizeof(u32); |
3092 | + int ciphertext_len, plaintext_len; |
3093 | + int ret; |
3094 | |
3095 | - dout("ceph_x_decrypt len %d\n", len); |
3096 | - if (*obuf == NULL) { |
3097 | - *obuf = kmalloc(len, GFP_NOFS); |
3098 | - if (!*obuf) |
3099 | - return -ENOMEM; |
3100 | - olen = len; |
3101 | - } |
3102 | + ceph_decode_32_safe(p, end, ciphertext_len, e_inval); |
3103 | + ceph_decode_need(p, end, ciphertext_len, e_inval); |
3104 | |
3105 | - ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len); |
3106 | + ret = ceph_crypt(secret, false, *p, end - *p, ciphertext_len, |
3107 | + &plaintext_len); |
3108 | if (ret) |
3109 | return ret; |
3110 | - if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC) |
3111 | + |
3112 | + if (hdr->struct_v != 1 || le64_to_cpu(hdr->magic) != CEPHX_ENC_MAGIC) |
3113 | return -EPERM; |
3114 | - *p += len; |
3115 | - return olen; |
3116 | + |
3117 | + *p += ciphertext_len; |
3118 | + return plaintext_len - sizeof(struct ceph_x_encrypt_header); |
3119 | + |
3120 | +e_inval: |
3121 | + return -EINVAL; |
3122 | } |
3123 | |
3124 | /* |
3125 | @@ -143,13 +145,10 @@ static int process_one_ticket(struct ceph_auth_client *ac, |
3126 | int type; |
3127 | u8 tkt_struct_v, blob_struct_v; |
3128 | struct ceph_x_ticket_handler *th; |
3129 | - void *dbuf = NULL; |
3130 | void *dp, *dend; |
3131 | int dlen; |
3132 | char is_enc; |
3133 | struct timespec validity; |
3134 | - struct ceph_crypto_key old_key; |
3135 | - void *ticket_buf = NULL; |
3136 | void *tp, *tpend; |
3137 | void **ptp; |
3138 | struct ceph_crypto_key new_session_key; |
3139 | @@ -174,20 +173,17 @@ static int process_one_ticket(struct ceph_auth_client *ac, |
3140 | } |
3141 | |
3142 | /* blob for me */ |
3143 | - dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0); |
3144 | - if (dlen <= 0) { |
3145 | - ret = dlen; |
3146 | + dp = *p + ceph_x_encrypt_offset(); |
3147 | + ret = ceph_x_decrypt(secret, p, end); |
3148 | + if (ret < 0) |
3149 | goto out; |
3150 | - } |
3151 | - dout(" decrypted %d bytes\n", dlen); |
3152 | - dp = dbuf; |
3153 | - dend = dp + dlen; |
3154 | + dout(" decrypted %d bytes\n", ret); |
3155 | + dend = dp + ret; |
3156 | |
3157 | tkt_struct_v = ceph_decode_8(&dp); |
3158 | if (tkt_struct_v != 1) |
3159 | goto bad; |
3160 | |
3161 | - memcpy(&old_key, &th->session_key, sizeof(old_key)); |
3162 | ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); |
3163 | if (ret) |
3164 | goto out; |
3165 | @@ -203,15 +199,13 @@ static int process_one_ticket(struct ceph_auth_client *ac, |
3166 | ceph_decode_8_safe(p, end, is_enc, bad); |
3167 | if (is_enc) { |
3168 | /* encrypted */ |
3169 | - dout(" encrypted ticket\n"); |
3170 | - dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0); |
3171 | - if (dlen < 0) { |
3172 | - ret = dlen; |
3173 | + tp = *p + ceph_x_encrypt_offset(); |
3174 | + ret = ceph_x_decrypt(&th->session_key, p, end); |
3175 | + if (ret < 0) |
3176 | goto out; |
3177 | - } |
3178 | - tp = ticket_buf; |
3179 | + dout(" encrypted ticket, decrypted %d bytes\n", ret); |
3180 | ptp = &tp; |
3181 | - tpend = *ptp + dlen; |
3182 | + tpend = tp + ret; |
3183 | } else { |
3184 | /* unencrypted */ |
3185 | ptp = p; |
3186 | @@ -242,8 +236,6 @@ static int process_one_ticket(struct ceph_auth_client *ac, |
3187 | xi->have_keys |= th->service; |
3188 | |
3189 | out: |
3190 | - kfree(ticket_buf); |
3191 | - kfree(dbuf); |
3192 | return ret; |
3193 | |
3194 | bad: |
3195 | @@ -294,7 +286,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac, |
3196 | { |
3197 | int maxlen; |
3198 | struct ceph_x_authorize_a *msg_a; |
3199 | - struct ceph_x_authorize_b msg_b; |
3200 | + struct ceph_x_authorize_b *msg_b; |
3201 | void *p, *end; |
3202 | int ret; |
3203 | int ticket_blob_len = |
3204 | @@ -308,8 +300,8 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac, |
3205 | if (ret) |
3206 | goto out_au; |
3207 | |
3208 | - maxlen = sizeof(*msg_a) + sizeof(msg_b) + |
3209 | - ceph_x_encrypt_buflen(ticket_blob_len); |
3210 | + maxlen = sizeof(*msg_a) + ticket_blob_len + |
3211 | + ceph_x_encrypt_buflen(sizeof(*msg_b)); |
3212 | dout(" need len %d\n", maxlen); |
3213 | if (au->buf && au->buf->alloc_len < maxlen) { |
3214 | ceph_buffer_put(au->buf); |
3215 | @@ -343,18 +335,19 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac, |
3216 | p += ticket_blob_len; |
3217 | end = au->buf->vec.iov_base + au->buf->vec.iov_len; |
3218 | |
3219 | + msg_b = p + ceph_x_encrypt_offset(); |
3220 | + msg_b->struct_v = 1; |
3221 | get_random_bytes(&au->nonce, sizeof(au->nonce)); |
3222 | - msg_b.struct_v = 1; |
3223 | - msg_b.nonce = cpu_to_le64(au->nonce); |
3224 | - ret = ceph_x_encrypt(&au->session_key, &msg_b, sizeof(msg_b), |
3225 | - p, end - p); |
3226 | + msg_b->nonce = cpu_to_le64(au->nonce); |
3227 | + ret = ceph_x_encrypt(&au->session_key, p, end - p, sizeof(*msg_b)); |
3228 | if (ret < 0) |
3229 | goto out_au; |
3230 | + |
3231 | p += ret; |
3232 | + WARN_ON(p > end); |
3233 | au->buf->vec.iov_len = p - au->buf->vec.iov_base; |
3234 | dout(" built authorizer nonce %llx len %d\n", au->nonce, |
3235 | (int)au->buf->vec.iov_len); |
3236 | - BUG_ON(au->buf->vec.iov_len > maxlen); |
3237 | return 0; |
3238 | |
3239 | out_au: |
3240 | @@ -452,8 +445,9 @@ static int ceph_x_build_request(struct ceph_auth_client *ac, |
3241 | if (need & CEPH_ENTITY_TYPE_AUTH) { |
3242 | struct ceph_x_authenticate *auth = (void *)(head + 1); |
3243 | void *p = auth + 1; |
3244 | - struct ceph_x_challenge_blob tmp; |
3245 | - char tmp_enc[40]; |
3246 | + void *enc_buf = xi->auth_authorizer.enc_buf; |
3247 | + struct ceph_x_challenge_blob *blob = enc_buf + |
3248 | + ceph_x_encrypt_offset(); |
3249 | u64 *u; |
3250 | |
3251 | if (p > end) |
3252 | @@ -464,16 +458,16 @@ static int ceph_x_build_request(struct ceph_auth_client *ac, |
3253 | |
3254 | /* encrypt and hash */ |
3255 | get_random_bytes(&auth->client_challenge, sizeof(u64)); |
3256 | - tmp.client_challenge = auth->client_challenge; |
3257 | - tmp.server_challenge = cpu_to_le64(xi->server_challenge); |
3258 | - ret = ceph_x_encrypt(&xi->secret, &tmp, sizeof(tmp), |
3259 | - tmp_enc, sizeof(tmp_enc)); |
3260 | + blob->client_challenge = auth->client_challenge; |
3261 | + blob->server_challenge = cpu_to_le64(xi->server_challenge); |
3262 | + ret = ceph_x_encrypt(&xi->secret, enc_buf, CEPHX_AU_ENC_BUF_LEN, |
3263 | + sizeof(*blob)); |
3264 | if (ret < 0) |
3265 | return ret; |
3266 | |
3267 | auth->struct_v = 1; |
3268 | auth->key = 0; |
3269 | - for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++) |
3270 | + for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++) |
3271 | auth->key ^= *(__le64 *)u; |
3272 | dout(" server_challenge %llx client_challenge %llx key %llx\n", |
3273 | xi->server_challenge, le64_to_cpu(auth->client_challenge), |
3274 | @@ -600,8 +594,8 @@ static int ceph_x_create_authorizer( |
3275 | auth->authorizer = (struct ceph_authorizer *) au; |
3276 | auth->authorizer_buf = au->buf->vec.iov_base; |
3277 | auth->authorizer_buf_len = au->buf->vec.iov_len; |
3278 | - auth->authorizer_reply_buf = au->reply_buf; |
3279 | - auth->authorizer_reply_buf_len = sizeof (au->reply_buf); |
3280 | + auth->authorizer_reply_buf = au->enc_buf; |
3281 | + auth->authorizer_reply_buf_len = CEPHX_AU_ENC_BUF_LEN; |
3282 | auth->sign_message = ac->ops->sign_message; |
3283 | auth->check_message_signature = ac->ops->check_message_signature; |
3284 | |
3285 | @@ -632,24 +626,22 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, |
3286 | struct ceph_authorizer *a, size_t len) |
3287 | { |
3288 | struct ceph_x_authorizer *au = (void *)a; |
3289 | - int ret = 0; |
3290 | - struct ceph_x_authorize_reply reply; |
3291 | - void *preply = &reply; |
3292 | - void *p = au->reply_buf; |
3293 | - void *end = p + sizeof(au->reply_buf); |
3294 | + void *p = au->enc_buf; |
3295 | + struct ceph_x_authorize_reply *reply = p + ceph_x_encrypt_offset(); |
3296 | + int ret; |
3297 | |
3298 | - ret = ceph_x_decrypt(&au->session_key, &p, end, &preply, sizeof(reply)); |
3299 | + ret = ceph_x_decrypt(&au->session_key, &p, p + CEPHX_AU_ENC_BUF_LEN); |
3300 | if (ret < 0) |
3301 | return ret; |
3302 | - if (ret != sizeof(reply)) |
3303 | + if (ret != sizeof(*reply)) |
3304 | return -EPERM; |
3305 | |
3306 | - if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one)) |
3307 | + if (au->nonce + 1 != le64_to_cpu(reply->nonce_plus_one)) |
3308 | ret = -EPERM; |
3309 | else |
3310 | ret = 0; |
3311 | dout("verify_authorizer_reply nonce %llx got %llx ret %d\n", |
3312 | - au->nonce, le64_to_cpu(reply.nonce_plus_one), ret); |
3313 | + au->nonce, le64_to_cpu(reply->nonce_plus_one), ret); |
3314 | return ret; |
3315 | } |
3316 | |
3317 | @@ -704,35 +696,48 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac, |
3318 | invalidate_ticket(ac, CEPH_ENTITY_TYPE_AUTH); |
3319 | } |
3320 | |
3321 | -static int calcu_signature(struct ceph_x_authorizer *au, |
3322 | - struct ceph_msg *msg, __le64 *sig) |
3323 | +static int calc_signature(struct ceph_x_authorizer *au, struct ceph_msg *msg, |
3324 | + __le64 *psig) |
3325 | { |
3326 | + void *enc_buf = au->enc_buf; |
3327 | + struct { |
3328 | + __le32 len; |
3329 | + __le32 header_crc; |
3330 | + __le32 front_crc; |
3331 | + __le32 middle_crc; |
3332 | + __le32 data_crc; |
3333 | + } __packed *sigblock = enc_buf + ceph_x_encrypt_offset(); |
3334 | int ret; |
3335 | - char tmp_enc[40]; |
3336 | - __le32 tmp[5] = { |
3337 | - cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc, |
3338 | - msg->footer.middle_crc, msg->footer.data_crc, |
3339 | - }; |
3340 | - ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp), |
3341 | - tmp_enc, sizeof(tmp_enc)); |
3342 | + |
3343 | + sigblock->len = cpu_to_le32(4*sizeof(u32)); |
3344 | + sigblock->header_crc = msg->hdr.crc; |
3345 | + sigblock->front_crc = msg->footer.front_crc; |
3346 | + sigblock->middle_crc = msg->footer.middle_crc; |
3347 | + sigblock->data_crc = msg->footer.data_crc; |
3348 | + ret = ceph_x_encrypt(&au->session_key, enc_buf, CEPHX_AU_ENC_BUF_LEN, |
3349 | + sizeof(*sigblock)); |
3350 | if (ret < 0) |
3351 | return ret; |
3352 | - *sig = *(__le64*)(tmp_enc + 4); |
3353 | + |
3354 | + *psig = *(__le64 *)(enc_buf + sizeof(u32)); |
3355 | return 0; |
3356 | } |
3357 | |
3358 | static int ceph_x_sign_message(struct ceph_auth_handshake *auth, |
3359 | struct ceph_msg *msg) |
3360 | { |
3361 | + __le64 sig; |
3362 | int ret; |
3363 | |
3364 | if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN)) |
3365 | return 0; |
3366 | |
3367 | - ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer, |
3368 | - msg, &msg->footer.sig); |
3369 | - if (ret < 0) |
3370 | + ret = calc_signature((struct ceph_x_authorizer *)auth->authorizer, |
3371 | + msg, &sig); |
3372 | + if (ret) |
3373 | return ret; |
3374 | + |
3375 | + msg->footer.sig = sig; |
3376 | msg->footer.flags |= CEPH_MSG_FOOTER_SIGNED; |
3377 | return 0; |
3378 | } |
3379 | @@ -746,9 +751,9 @@ static int ceph_x_check_message_signature(struct ceph_auth_handshake *auth, |
3380 | if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN)) |
3381 | return 0; |
3382 | |
3383 | - ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer, |
3384 | - msg, &sig_check); |
3385 | - if (ret < 0) |
3386 | + ret = calc_signature((struct ceph_x_authorizer *)auth->authorizer, |
3387 | + msg, &sig_check); |
3388 | + if (ret) |
3389 | return ret; |
3390 | if (sig_check == msg->footer.sig) |
3391 | return 0; |
3392 | diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h |
3393 | index 21a5af904bae..48e9ad41bd2a 100644 |
3394 | --- a/net/ceph/auth_x.h |
3395 | +++ b/net/ceph/auth_x.h |
3396 | @@ -24,6 +24,7 @@ struct ceph_x_ticket_handler { |
3397 | unsigned long renew_after, expires; |
3398 | }; |
3399 | |
3400 | +#define CEPHX_AU_ENC_BUF_LEN 128 /* big enough for encrypted blob */ |
3401 | |
3402 | struct ceph_x_authorizer { |
3403 | struct ceph_authorizer base; |
3404 | @@ -32,7 +33,7 @@ struct ceph_x_authorizer { |
3405 | unsigned int service; |
3406 | u64 nonce; |
3407 | u64 secret_id; |
3408 | - char reply_buf[128]; /* big enough for encrypted blob */ |
3409 | + char enc_buf[CEPHX_AU_ENC_BUF_LEN] __aligned(8); |
3410 | }; |
3411 | |
3412 | struct ceph_x_info { |
3413 | diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c |
3414 | index db2847ac5f12..292e33bd916e 100644 |
3415 | --- a/net/ceph/crypto.c |
3416 | +++ b/net/ceph/crypto.c |
3417 | @@ -13,14 +13,60 @@ |
3418 | #include <linux/ceph/decode.h> |
3419 | #include "crypto.h" |
3420 | |
3421 | +/* |
3422 | + * Set ->key and ->tfm. The rest of the key should be filled in before |
3423 | + * this function is called. |
3424 | + */ |
3425 | +static int set_secret(struct ceph_crypto_key *key, void *buf) |
3426 | +{ |
3427 | + unsigned int noio_flag; |
3428 | + int ret; |
3429 | + |
3430 | + key->key = NULL; |
3431 | + key->tfm = NULL; |
3432 | + |
3433 | + switch (key->type) { |
3434 | + case CEPH_CRYPTO_NONE: |
3435 | + return 0; /* nothing to do */ |
3436 | + case CEPH_CRYPTO_AES: |
3437 | + break; |
3438 | + default: |
3439 | + return -ENOTSUPP; |
3440 | + } |
3441 | + |
3442 | + WARN_ON(!key->len); |
3443 | + key->key = kmemdup(buf, key->len, GFP_NOIO); |
3444 | + if (!key->key) { |
3445 | + ret = -ENOMEM; |
3446 | + goto fail; |
3447 | + } |
3448 | + |
3449 | + /* crypto_alloc_skcipher() allocates with GFP_KERNEL */ |
3450 | + noio_flag = memalloc_noio_save(); |
3451 | + key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); |
3452 | + memalloc_noio_restore(noio_flag); |
3453 | + if (IS_ERR(key->tfm)) { |
3454 | + ret = PTR_ERR(key->tfm); |
3455 | + key->tfm = NULL; |
3456 | + goto fail; |
3457 | + } |
3458 | + |
3459 | + ret = crypto_skcipher_setkey(key->tfm, key->key, key->len); |
3460 | + if (ret) |
3461 | + goto fail; |
3462 | + |
3463 | + return 0; |
3464 | + |
3465 | +fail: |
3466 | + ceph_crypto_key_destroy(key); |
3467 | + return ret; |
3468 | +} |
3469 | + |
3470 | int ceph_crypto_key_clone(struct ceph_crypto_key *dst, |
3471 | const struct ceph_crypto_key *src) |
3472 | { |
3473 | memcpy(dst, src, sizeof(struct ceph_crypto_key)); |
3474 | - dst->key = kmemdup(src->key, src->len, GFP_NOFS); |
3475 | - if (!dst->key) |
3476 | - return -ENOMEM; |
3477 | - return 0; |
3478 | + return set_secret(dst, src->key); |
3479 | } |
3480 | |
3481 | int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end) |
3482 | @@ -37,16 +83,16 @@ int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end) |
3483 | |
3484 | int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end) |
3485 | { |
3486 | + int ret; |
3487 | + |
3488 | ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad); |
3489 | key->type = ceph_decode_16(p); |
3490 | ceph_decode_copy(p, &key->created, sizeof(key->created)); |
3491 | key->len = ceph_decode_16(p); |
3492 | ceph_decode_need(p, end, key->len, bad); |
3493 | - key->key = kmalloc(key->len, GFP_NOFS); |
3494 | - if (!key->key) |
3495 | - return -ENOMEM; |
3496 | - ceph_decode_copy(p, key->key, key->len); |
3497 | - return 0; |
3498 | + ret = set_secret(key, *p); |
3499 | + *p += key->len; |
3500 | + return ret; |
3501 | |
3502 | bad: |
3503 | dout("failed to decode crypto key\n"); |
3504 | @@ -80,9 +126,14 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey) |
3505 | return 0; |
3506 | } |
3507 | |
3508 | -static struct crypto_skcipher *ceph_crypto_alloc_cipher(void) |
3509 | +void ceph_crypto_key_destroy(struct ceph_crypto_key *key) |
3510 | { |
3511 | - return crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); |
3512 | + if (key) { |
3513 | + kfree(key->key); |
3514 | + key->key = NULL; |
3515 | + crypto_free_skcipher(key->tfm); |
3516 | + key->tfm = NULL; |
3517 | + } |
3518 | } |
3519 | |
3520 | static const u8 *aes_iv = (u8 *)CEPH_AES_IV; |
3521 | @@ -157,372 +208,82 @@ static void teardown_sgtable(struct sg_table *sgt) |
3522 | sg_free_table(sgt); |
3523 | } |
3524 | |
3525 | -static int ceph_aes_encrypt(const void *key, int key_len, |
3526 | - void *dst, size_t *dst_len, |
3527 | - const void *src, size_t src_len) |
3528 | -{ |
3529 | - struct scatterlist sg_in[2], prealloc_sg; |
3530 | - struct sg_table sg_out; |
3531 | - struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher(); |
3532 | - SKCIPHER_REQUEST_ON_STACK(req, tfm); |
3533 | - int ret; |
3534 | - char iv[AES_BLOCK_SIZE]; |
3535 | - size_t zero_padding = (0x10 - (src_len & 0x0f)); |
3536 | - char pad[16]; |
3537 | - |
3538 | - if (IS_ERR(tfm)) |
3539 | - return PTR_ERR(tfm); |
3540 | - |
3541 | - memset(pad, zero_padding, zero_padding); |
3542 | - |
3543 | - *dst_len = src_len + zero_padding; |
3544 | - |
3545 | - sg_init_table(sg_in, 2); |
3546 | - sg_set_buf(&sg_in[0], src, src_len); |
3547 | - sg_set_buf(&sg_in[1], pad, zero_padding); |
3548 | - ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); |
3549 | - if (ret) |
3550 | - goto out_tfm; |
3551 | - |
3552 | - crypto_skcipher_setkey((void *)tfm, key, key_len); |
3553 | - memcpy(iv, aes_iv, AES_BLOCK_SIZE); |
3554 | - |
3555 | - skcipher_request_set_tfm(req, tfm); |
3556 | - skcipher_request_set_callback(req, 0, NULL, NULL); |
3557 | - skcipher_request_set_crypt(req, sg_in, sg_out.sgl, |
3558 | - src_len + zero_padding, iv); |
3559 | - |
3560 | - /* |
3561 | - print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, |
3562 | - key, key_len, 1); |
3563 | - print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1, |
3564 | - src, src_len, 1); |
3565 | - print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, |
3566 | - pad, zero_padding, 1); |
3567 | - */ |
3568 | - ret = crypto_skcipher_encrypt(req); |
3569 | - skcipher_request_zero(req); |
3570 | - if (ret < 0) { |
3571 | - pr_err("ceph_aes_crypt failed %d\n", ret); |
3572 | - goto out_sg; |
3573 | - } |
3574 | - /* |
3575 | - print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, |
3576 | - dst, *dst_len, 1); |
3577 | - */ |
3578 | - |
3579 | -out_sg: |
3580 | - teardown_sgtable(&sg_out); |
3581 | -out_tfm: |
3582 | - crypto_free_skcipher(tfm); |
3583 | - return ret; |
3584 | -} |
3585 | - |
3586 | -static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, |
3587 | - size_t *dst_len, |
3588 | - const void *src1, size_t src1_len, |
3589 | - const void *src2, size_t src2_len) |
3590 | -{ |
3591 | - struct scatterlist sg_in[3], prealloc_sg; |
3592 | - struct sg_table sg_out; |
3593 | - struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher(); |
3594 | - SKCIPHER_REQUEST_ON_STACK(req, tfm); |
3595 | - int ret; |
3596 | - char iv[AES_BLOCK_SIZE]; |
3597 | - size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f)); |
3598 | - char pad[16]; |
3599 | - |
3600 | - if (IS_ERR(tfm)) |
3601 | - return PTR_ERR(tfm); |
3602 | - |
3603 | - memset(pad, zero_padding, zero_padding); |
3604 | - |
3605 | - *dst_len = src1_len + src2_len + zero_padding; |
3606 | - |
3607 | - sg_init_table(sg_in, 3); |
3608 | - sg_set_buf(&sg_in[0], src1, src1_len); |
3609 | - sg_set_buf(&sg_in[1], src2, src2_len); |
3610 | - sg_set_buf(&sg_in[2], pad, zero_padding); |
3611 | - ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); |
3612 | - if (ret) |
3613 | - goto out_tfm; |
3614 | - |
3615 | - crypto_skcipher_setkey((void *)tfm, key, key_len); |
3616 | - memcpy(iv, aes_iv, AES_BLOCK_SIZE); |
3617 | - |
3618 | - skcipher_request_set_tfm(req, tfm); |
3619 | - skcipher_request_set_callback(req, 0, NULL, NULL); |
3620 | - skcipher_request_set_crypt(req, sg_in, sg_out.sgl, |
3621 | - src1_len + src2_len + zero_padding, iv); |
3622 | - |
3623 | - /* |
3624 | - print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, |
3625 | - key, key_len, 1); |
3626 | - print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1, |
3627 | - src1, src1_len, 1); |
3628 | - print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1, |
3629 | - src2, src2_len, 1); |
3630 | - print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, |
3631 | - pad, zero_padding, 1); |
3632 | - */ |
3633 | - ret = crypto_skcipher_encrypt(req); |
3634 | - skcipher_request_zero(req); |
3635 | - if (ret < 0) { |
3636 | - pr_err("ceph_aes_crypt2 failed %d\n", ret); |
3637 | - goto out_sg; |
3638 | - } |
3639 | - /* |
3640 | - print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, |
3641 | - dst, *dst_len, 1); |
3642 | - */ |
3643 | - |
3644 | -out_sg: |
3645 | - teardown_sgtable(&sg_out); |
3646 | -out_tfm: |
3647 | - crypto_free_skcipher(tfm); |
3648 | - return ret; |
3649 | -} |
3650 | - |
3651 | -static int ceph_aes_decrypt(const void *key, int key_len, |
3652 | - void *dst, size_t *dst_len, |
3653 | - const void *src, size_t src_len) |
3654 | +static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt, |
3655 | + void *buf, int buf_len, int in_len, int *pout_len) |
3656 | { |
3657 | - struct sg_table sg_in; |
3658 | - struct scatterlist sg_out[2], prealloc_sg; |
3659 | - struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher(); |
3660 | - SKCIPHER_REQUEST_ON_STACK(req, tfm); |
3661 | - char pad[16]; |
3662 | - char iv[AES_BLOCK_SIZE]; |
3663 | + SKCIPHER_REQUEST_ON_STACK(req, key->tfm); |
3664 | + struct sg_table sgt; |
3665 | + struct scatterlist prealloc_sg; |
3666 | + char iv[AES_BLOCK_SIZE] __aligned(8); |
3667 | + int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1)); |
3668 | + int crypt_len = encrypt ? in_len + pad_byte : in_len; |
3669 | int ret; |
3670 | - int last_byte; |
3671 | - |
3672 | - if (IS_ERR(tfm)) |
3673 | - return PTR_ERR(tfm); |
3674 | |
3675 | - sg_init_table(sg_out, 2); |
3676 | - sg_set_buf(&sg_out[0], dst, *dst_len); |
3677 | - sg_set_buf(&sg_out[1], pad, sizeof(pad)); |
3678 | - ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); |
3679 | + WARN_ON(crypt_len > buf_len); |
3680 | + if (encrypt) |
3681 | + memset(buf + in_len, pad_byte, pad_byte); |
3682 | + ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len); |
3683 | if (ret) |
3684 | - goto out_tfm; |
3685 | + return ret; |
3686 | |
3687 | - crypto_skcipher_setkey((void *)tfm, key, key_len); |
3688 | memcpy(iv, aes_iv, AES_BLOCK_SIZE); |
3689 | - |
3690 | - skcipher_request_set_tfm(req, tfm); |
3691 | + skcipher_request_set_tfm(req, key->tfm); |
3692 | skcipher_request_set_callback(req, 0, NULL, NULL); |
3693 | - skcipher_request_set_crypt(req, sg_in.sgl, sg_out, |
3694 | - src_len, iv); |
3695 | + skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv); |
3696 | |
3697 | /* |
3698 | - print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1, |
3699 | - key, key_len, 1); |
3700 | - print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, |
3701 | - src, src_len, 1); |
3702 | + print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1, |
3703 | + key->key, key->len, 1); |
3704 | + print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1, |
3705 | + buf, crypt_len, 1); |
3706 | */ |
3707 | - ret = crypto_skcipher_decrypt(req); |
3708 | - skcipher_request_zero(req); |
3709 | - if (ret < 0) { |
3710 | - pr_err("ceph_aes_decrypt failed %d\n", ret); |
3711 | - goto out_sg; |
3712 | - } |
3713 | - |
3714 | - if (src_len <= *dst_len) |
3715 | - last_byte = ((char *)dst)[src_len - 1]; |
3716 | + if (encrypt) |
3717 | + ret = crypto_skcipher_encrypt(req); |
3718 | else |
3719 | - last_byte = pad[src_len - *dst_len - 1]; |
3720 | - if (last_byte <= 16 && src_len >= last_byte) { |
3721 | - *dst_len = src_len - last_byte; |
3722 | - } else { |
3723 | - pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", |
3724 | - last_byte, (int)src_len); |
3725 | - return -EPERM; /* bad padding */ |
3726 | - } |
3727 | - /* |
3728 | - print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, |
3729 | - dst, *dst_len, 1); |
3730 | - */ |
3731 | - |
3732 | -out_sg: |
3733 | - teardown_sgtable(&sg_in); |
3734 | -out_tfm: |
3735 | - crypto_free_skcipher(tfm); |
3736 | - return ret; |
3737 | -} |
3738 | - |
3739 | -static int ceph_aes_decrypt2(const void *key, int key_len, |
3740 | - void *dst1, size_t *dst1_len, |
3741 | - void *dst2, size_t *dst2_len, |
3742 | - const void *src, size_t src_len) |
3743 | -{ |
3744 | - struct sg_table sg_in; |
3745 | - struct scatterlist sg_out[3], prealloc_sg; |
3746 | - struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher(); |
3747 | - SKCIPHER_REQUEST_ON_STACK(req, tfm); |
3748 | - char pad[16]; |
3749 | - char iv[AES_BLOCK_SIZE]; |
3750 | - int ret; |
3751 | - int last_byte; |
3752 | - |
3753 | - if (IS_ERR(tfm)) |
3754 | - return PTR_ERR(tfm); |
3755 | - |
3756 | - sg_init_table(sg_out, 3); |
3757 | - sg_set_buf(&sg_out[0], dst1, *dst1_len); |
3758 | - sg_set_buf(&sg_out[1], dst2, *dst2_len); |
3759 | - sg_set_buf(&sg_out[2], pad, sizeof(pad)); |
3760 | - ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); |
3761 | - if (ret) |
3762 | - goto out_tfm; |
3763 | - |
3764 | - crypto_skcipher_setkey((void *)tfm, key, key_len); |
3765 | - memcpy(iv, aes_iv, AES_BLOCK_SIZE); |
3766 | - |
3767 | - skcipher_request_set_tfm(req, tfm); |
3768 | - skcipher_request_set_callback(req, 0, NULL, NULL); |
3769 | - skcipher_request_set_crypt(req, sg_in.sgl, sg_out, |
3770 | - src_len, iv); |
3771 | - |
3772 | - /* |
3773 | - print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1, |
3774 | - key, key_len, 1); |
3775 | - print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, |
3776 | - src, src_len, 1); |
3777 | - */ |
3778 | - ret = crypto_skcipher_decrypt(req); |
3779 | + ret = crypto_skcipher_decrypt(req); |
3780 | skcipher_request_zero(req); |
3781 | - if (ret < 0) { |
3782 | - pr_err("ceph_aes_decrypt failed %d\n", ret); |
3783 | - goto out_sg; |
3784 | - } |
3785 | - |
3786 | - if (src_len <= *dst1_len) |
3787 | - last_byte = ((char *)dst1)[src_len - 1]; |
3788 | - else if (src_len <= *dst1_len + *dst2_len) |
3789 | - last_byte = ((char *)dst2)[src_len - *dst1_len - 1]; |
3790 | - else |
3791 | - last_byte = pad[src_len - *dst1_len - *dst2_len - 1]; |
3792 | - if (last_byte <= 16 && src_len >= last_byte) { |
3793 | - src_len -= last_byte; |
3794 | - } else { |
3795 | - pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", |
3796 | - last_byte, (int)src_len); |
3797 | - return -EPERM; /* bad padding */ |
3798 | - } |
3799 | - |
3800 | - if (src_len < *dst1_len) { |
3801 | - *dst1_len = src_len; |
3802 | - *dst2_len = 0; |
3803 | - } else { |
3804 | - *dst2_len = src_len - *dst1_len; |
3805 | + if (ret) { |
3806 | + pr_err("%s %scrypt failed: %d\n", __func__, |
3807 | + encrypt ? "en" : "de", ret); |
3808 | + goto out_sgt; |
3809 | } |
3810 | /* |
3811 | - print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1, |
3812 | - dst1, *dst1_len, 1); |
3813 | - print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1, |
3814 | - dst2, *dst2_len, 1); |
3815 | + print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1, |
3816 | + buf, crypt_len, 1); |
3817 | */ |
3818 | |
3819 | -out_sg: |
3820 | - teardown_sgtable(&sg_in); |
3821 | -out_tfm: |
3822 | - crypto_free_skcipher(tfm); |
3823 | - return ret; |
3824 | -} |
3825 | - |
3826 | - |
3827 | -int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, |
3828 | - const void *src, size_t src_len) |
3829 | -{ |
3830 | - switch (secret->type) { |
3831 | - case CEPH_CRYPTO_NONE: |
3832 | - if (*dst_len < src_len) |
3833 | - return -ERANGE; |
3834 | - memcpy(dst, src, src_len); |
3835 | - *dst_len = src_len; |
3836 | - return 0; |
3837 | - |
3838 | - case CEPH_CRYPTO_AES: |
3839 | - return ceph_aes_decrypt(secret->key, secret->len, dst, |
3840 | - dst_len, src, src_len); |
3841 | - |
3842 | - default: |
3843 | - return -EINVAL; |
3844 | - } |
3845 | -} |
3846 | - |
3847 | -int ceph_decrypt2(struct ceph_crypto_key *secret, |
3848 | - void *dst1, size_t *dst1_len, |
3849 | - void *dst2, size_t *dst2_len, |
3850 | - const void *src, size_t src_len) |
3851 | -{ |
3852 | - size_t t; |
3853 | - |
3854 | - switch (secret->type) { |
3855 | - case CEPH_CRYPTO_NONE: |
3856 | - if (*dst1_len + *dst2_len < src_len) |
3857 | - return -ERANGE; |
3858 | - t = min(*dst1_len, src_len); |
3859 | - memcpy(dst1, src, t); |
3860 | - *dst1_len = t; |
3861 | - src += t; |
3862 | - src_len -= t; |
3863 | - if (src_len) { |
3864 | - t = min(*dst2_len, src_len); |
3865 | - memcpy(dst2, src, t); |
3866 | - *dst2_len = t; |
3867 | + if (encrypt) { |
3868 | + *pout_len = crypt_len; |
3869 | + } else { |
3870 | + pad_byte = *(char *)(buf + in_len - 1); |
3871 | + if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE && |
3872 | + in_len >= pad_byte) { |
3873 | + *pout_len = in_len - pad_byte; |
3874 | + } else { |
3875 | + pr_err("%s got bad padding %d on in_len %d\n", |
3876 | + __func__, pad_byte, in_len); |
3877 | + ret = -EPERM; |
3878 | + goto out_sgt; |
3879 | } |
3880 | - return 0; |
3881 | - |
3882 | - case CEPH_CRYPTO_AES: |
3883 | - return ceph_aes_decrypt2(secret->key, secret->len, |
3884 | - dst1, dst1_len, dst2, dst2_len, |
3885 | - src, src_len); |
3886 | - |
3887 | - default: |
3888 | - return -EINVAL; |
3889 | } |
3890 | -} |
3891 | - |
3892 | -int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, |
3893 | - const void *src, size_t src_len) |
3894 | -{ |
3895 | - switch (secret->type) { |
3896 | - case CEPH_CRYPTO_NONE: |
3897 | - if (*dst_len < src_len) |
3898 | - return -ERANGE; |
3899 | - memcpy(dst, src, src_len); |
3900 | - *dst_len = src_len; |
3901 | - return 0; |
3902 | |
3903 | - case CEPH_CRYPTO_AES: |
3904 | - return ceph_aes_encrypt(secret->key, secret->len, dst, |
3905 | - dst_len, src, src_len); |
3906 | - |
3907 | - default: |
3908 | - return -EINVAL; |
3909 | - } |
3910 | +out_sgt: |
3911 | + teardown_sgtable(&sgt); |
3912 | + return ret; |
3913 | } |
3914 | |
3915 | -int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, |
3916 | - const void *src1, size_t src1_len, |
3917 | - const void *src2, size_t src2_len) |
3918 | +int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt, |
3919 | + void *buf, int buf_len, int in_len, int *pout_len) |
3920 | { |
3921 | - switch (secret->type) { |
3922 | + switch (key->type) { |
3923 | case CEPH_CRYPTO_NONE: |
3924 | - if (*dst_len < src1_len + src2_len) |
3925 | - return -ERANGE; |
3926 | - memcpy(dst, src1, src1_len); |
3927 | - memcpy(dst + src1_len, src2, src2_len); |
3928 | - *dst_len = src1_len + src2_len; |
3929 | + *pout_len = in_len; |
3930 | return 0; |
3931 | - |
3932 | case CEPH_CRYPTO_AES: |
3933 | - return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len, |
3934 | - src1, src1_len, src2, src2_len); |
3935 | - |
3936 | + return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len, |
3937 | + pout_len); |
3938 | default: |
3939 | - return -EINVAL; |
3940 | + return -ENOTSUPP; |
3941 | } |
3942 | } |
3943 | |
3944 | diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h |
3945 | index 2e9cab09f37b..58d83aa7740f 100644 |
3946 | --- a/net/ceph/crypto.h |
3947 | +++ b/net/ceph/crypto.h |
3948 | @@ -12,37 +12,19 @@ struct ceph_crypto_key { |
3949 | struct ceph_timespec created; |
3950 | int len; |
3951 | void *key; |
3952 | + struct crypto_skcipher *tfm; |
3953 | }; |
3954 | |
3955 | -static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key) |
3956 | -{ |
3957 | - if (key) { |
3958 | - kfree(key->key); |
3959 | - key->key = NULL; |
3960 | - } |
3961 | -} |
3962 | - |
3963 | int ceph_crypto_key_clone(struct ceph_crypto_key *dst, |
3964 | const struct ceph_crypto_key *src); |
3965 | int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end); |
3966 | int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end); |
3967 | int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in); |
3968 | +void ceph_crypto_key_destroy(struct ceph_crypto_key *key); |
3969 | |
3970 | /* crypto.c */ |
3971 | -int ceph_decrypt(struct ceph_crypto_key *secret, |
3972 | - void *dst, size_t *dst_len, |
3973 | - const void *src, size_t src_len); |
3974 | -int ceph_encrypt(struct ceph_crypto_key *secret, |
3975 | - void *dst, size_t *dst_len, |
3976 | - const void *src, size_t src_len); |
3977 | -int ceph_decrypt2(struct ceph_crypto_key *secret, |
3978 | - void *dst1, size_t *dst1_len, |
3979 | - void *dst2, size_t *dst2_len, |
3980 | - const void *src, size_t src_len); |
3981 | -int ceph_encrypt2(struct ceph_crypto_key *secret, |
3982 | - void *dst, size_t *dst_len, |
3983 | - const void *src1, size_t src1_len, |
3984 | - const void *src2, size_t src2_len); |
3985 | +int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt, |
3986 | + void *buf, int buf_len, int in_len, int *pout_len); |
3987 | int ceph_crypto_init(void); |
3988 | void ceph_crypto_shutdown(void); |
3989 | |
3990 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c |
3991 | index a47bbc973f2d..2384b4aae064 100644 |
3992 | --- a/net/mac80211/rx.c |
3993 | +++ b/net/mac80211/rx.c |
3994 | @@ -3939,21 +3939,31 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, |
3995 | u64_stats_update_end(&stats->syncp); |
3996 | |
3997 | if (fast_rx->internal_forward) { |
3998 | - struct sta_info *dsta = sta_info_get(rx->sdata, skb->data); |
3999 | + struct sk_buff *xmit_skb = NULL; |
4000 | + bool multicast = is_multicast_ether_addr(skb->data); |
4001 | |
4002 | - if (dsta) { |
4003 | + if (multicast) { |
4004 | + xmit_skb = skb_copy(skb, GFP_ATOMIC); |
4005 | + } else if (sta_info_get(rx->sdata, skb->data)) { |
4006 | + xmit_skb = skb; |
4007 | + skb = NULL; |
4008 | + } |
4009 | + |
4010 | + if (xmit_skb) { |
4011 | /* |
4012 | * Send to wireless media and increase priority by 256 |
4013 | * to keep the received priority instead of |
4014 | * reclassifying the frame (see cfg80211_classify8021d). |
4015 | */ |
4016 | - skb->priority += 256; |
4017 | - skb->protocol = htons(ETH_P_802_3); |
4018 | - skb_reset_network_header(skb); |
4019 | - skb_reset_mac_header(skb); |
4020 | - dev_queue_xmit(skb); |
4021 | - return true; |
4022 | + xmit_skb->priority += 256; |
4023 | + xmit_skb->protocol = htons(ETH_P_802_3); |
4024 | + skb_reset_network_header(xmit_skb); |
4025 | + skb_reset_mac_header(xmit_skb); |
4026 | + dev_queue_xmit(xmit_skb); |
4027 | } |
4028 | + |
4029 | + if (!skb) |
4030 | + return true; |
4031 | } |
4032 | |
4033 | /* deliver to local stack */ |
4034 | diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c |
4035 | index 45662d7f0943..6fdffde28733 100644 |
4036 | --- a/net/sunrpc/auth_gss/svcauth_gss.c |
4037 | +++ b/net/sunrpc/auth_gss/svcauth_gss.c |
4038 | @@ -1489,7 +1489,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) |
4039 | case RPC_GSS_PROC_DESTROY: |
4040 | if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) |
4041 | goto auth_err; |
4042 | - rsci->h.expiry_time = get_seconds(); |
4043 | + rsci->h.expiry_time = seconds_since_boot(); |
4044 | set_bit(CACHE_NEGATIVE, &rsci->h.flags); |
4045 | if (resv->iov_len + 4 > PAGE_SIZE) |
4046 | goto drop; |
4047 | diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c |
4048 | index 3bc1d61694cb..9c9db55a0c1e 100644 |
4049 | --- a/net/sunrpc/svc_xprt.c |
4050 | +++ b/net/sunrpc/svc_xprt.c |
4051 | @@ -799,6 +799,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) |
4052 | |
4053 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { |
4054 | dprintk("svc_recv: found XPT_CLOSE\n"); |
4055 | + if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags)) |
4056 | + xprt->xpt_ops->xpo_kill_temp_xprt(xprt); |
4057 | svc_delete_xprt(xprt); |
4058 | /* Leave XPT_BUSY set on the dead xprt: */ |
4059 | goto out; |
4060 | @@ -1020,9 +1022,11 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) |
4061 | le = to_be_closed.next; |
4062 | list_del_init(le); |
4063 | xprt = list_entry(le, struct svc_xprt, xpt_list); |
4064 | - dprintk("svc_age_temp_xprts_now: closing %p\n", xprt); |
4065 | - xprt->xpt_ops->xpo_kill_temp_xprt(xprt); |
4066 | - svc_close_xprt(xprt); |
4067 | + set_bit(XPT_CLOSE, &xprt->xpt_flags); |
4068 | + set_bit(XPT_KILL_TEMP, &xprt->xpt_flags); |
4069 | + dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n", |
4070 | + xprt); |
4071 | + svc_xprt_enqueue(xprt); |
4072 | } |
4073 | } |
4074 | EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now); |
4075 | diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c |
4076 | index 26b26beef2d4..adbf52c6df83 100644 |
4077 | --- a/net/sunrpc/xprtrdma/frwr_ops.c |
4078 | +++ b/net/sunrpc/xprtrdma/frwr_ops.c |
4079 | @@ -421,7 +421,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, |
4080 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : |
4081 | IB_ACCESS_REMOTE_READ; |
4082 | |
4083 | - DECR_CQCOUNT(&r_xprt->rx_ep); |
4084 | + rpcrdma_set_signaled(&r_xprt->rx_ep, ®_wr->wr); |
4085 | rc = ib_post_send(ia->ri_id->qp, ®_wr->wr, &bad_wr); |
4086 | if (rc) |
4087 | goto out_senderr; |
4088 | @@ -486,7 +486,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
4089 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
4090 | struct rpcrdma_mw *mw, *tmp; |
4091 | struct rpcrdma_frmr *f; |
4092 | - int rc; |
4093 | + int count, rc; |
4094 | |
4095 | dprintk("RPC: %s: req %p\n", __func__, req); |
4096 | |
4097 | @@ -496,6 +496,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
4098 | * a single ib_post_send() call. |
4099 | */ |
4100 | f = NULL; |
4101 | + count = 0; |
4102 | invalidate_wrs = pos = prev = NULL; |
4103 | list_for_each_entry(mw, &req->rl_registered, mw_list) { |
4104 | if ((rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) && |
4105 | @@ -505,6 +506,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
4106 | } |
4107 | |
4108 | pos = __frwr_prepare_linv_wr(mw); |
4109 | + count++; |
4110 | |
4111 | if (!invalidate_wrs) |
4112 | invalidate_wrs = pos; |
4113 | @@ -523,7 +525,12 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
4114 | f->fr_invwr.send_flags = IB_SEND_SIGNALED; |
4115 | f->fr_cqe.done = frwr_wc_localinv_wake; |
4116 | reinit_completion(&f->fr_linv_done); |
4117 | - INIT_CQCOUNT(&r_xprt->rx_ep); |
4118 | + |
4119 | + /* Initialize CQ count, since there is always a signaled |
4120 | + * WR being posted here. The new cqcount depends on how |
4121 | + * many SQEs are about to be consumed. |
4122 | + */ |
4123 | + rpcrdma_init_cqcount(&r_xprt->rx_ep, count); |
4124 | |
4125 | /* Transport disconnect drains the receive CQ before it |
4126 | * replaces the QP. The RPC reply handler won't call us |
4127 | diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c |
4128 | index ad1df979b3f0..a47c9bdef5fa 100644 |
4129 | --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c |
4130 | +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c |
4131 | @@ -348,8 +348,6 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, |
4132 | atomic_inc(&rdma_stat_read); |
4133 | return ret; |
4134 | err: |
4135 | - ib_dma_unmap_sg(xprt->sc_cm_id->device, |
4136 | - frmr->sg, frmr->sg_nents, frmr->direction); |
4137 | svc_rdma_put_context(ctxt, 0); |
4138 | svc_rdma_put_frmr(xprt, frmr); |
4139 | return ret; |
4140 | diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c |
4141 | index ec74289af7ec..8da7f6a4dfc3 100644 |
4142 | --- a/net/sunrpc/xprtrdma/verbs.c |
4143 | +++ b/net/sunrpc/xprtrdma/verbs.c |
4144 | @@ -223,8 +223,8 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, |
4145 | cdata->inline_rsize = rsize; |
4146 | if (wsize < cdata->inline_wsize) |
4147 | cdata->inline_wsize = wsize; |
4148 | - pr_info("rpcrdma: max send %u, max recv %u\n", |
4149 | - cdata->inline_wsize, cdata->inline_rsize); |
4150 | + dprintk("RPC: %s: max send %u, max recv %u\n", |
4151 | + __func__, cdata->inline_wsize, cdata->inline_rsize); |
4152 | rpcrdma_set_max_header_sizes(r_xprt); |
4153 | } |
4154 | |
4155 | @@ -532,7 +532,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, |
4156 | ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1; |
4157 | if (ep->rep_cqinit <= 2) |
4158 | ep->rep_cqinit = 0; /* always signal? */ |
4159 | - INIT_CQCOUNT(ep); |
4160 | + rpcrdma_init_cqcount(ep, 0); |
4161 | init_waitqueue_head(&ep->rep_connect_wait); |
4162 | INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); |
4163 | |
4164 | @@ -1311,13 +1311,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia, |
4165 | dprintk("RPC: %s: posting %d s/g entries\n", |
4166 | __func__, send_wr->num_sge); |
4167 | |
4168 | - if (DECR_CQCOUNT(ep) > 0) |
4169 | - send_wr->send_flags = 0; |
4170 | - else { /* Provider must take a send completion every now and then */ |
4171 | - INIT_CQCOUNT(ep); |
4172 | - send_wr->send_flags = IB_SEND_SIGNALED; |
4173 | - } |
4174 | - |
4175 | + rpcrdma_set_signaled(ep, send_wr); |
4176 | rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail); |
4177 | if (rc) |
4178 | goto out_postsend_err; |
4179 | diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h |
4180 | index 6e1bba358203..f6ae1b22da47 100644 |
4181 | --- a/net/sunrpc/xprtrdma/xprt_rdma.h |
4182 | +++ b/net/sunrpc/xprtrdma/xprt_rdma.h |
4183 | @@ -95,8 +95,24 @@ struct rpcrdma_ep { |
4184 | struct delayed_work rep_connect_worker; |
4185 | }; |
4186 | |
4187 | -#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) |
4188 | -#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) |
4189 | +static inline void |
4190 | +rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count) |
4191 | +{ |
4192 | + atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count); |
4193 | +} |
4194 | + |
4195 | +/* To update send queue accounting, provider must take a |
4196 | + * send completion every now and then. |
4197 | + */ |
4198 | +static inline void |
4199 | +rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr) |
4200 | +{ |
4201 | + send_wr->send_flags = 0; |
4202 | + if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) { |
4203 | + rpcrdma_init_cqcount(ep, 0); |
4204 | + send_wr->send_flags = IB_SEND_SIGNALED; |
4205 | + } |
4206 | +} |
4207 | |
4208 | /* Pre-allocate extra Work Requests for handling backward receives |
4209 | * and sends. This is a fixed value because the Work Queues are |
4210 | diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config |
4211 | index 72edf83d76b7..cffdd9cf3ebf 100644 |
4212 | --- a/tools/perf/Makefile.config |
4213 | +++ b/tools/perf/Makefile.config |
4214 | @@ -366,7 +366,7 @@ ifndef NO_SDT |
4215 | endif |
4216 | |
4217 | ifdef PERF_HAVE_JITDUMP |
4218 | - ifndef NO_DWARF |
4219 | + ifndef NO_LIBELF |
4220 | $(call detected,CONFIG_JITDUMP) |
4221 | CFLAGS += -DHAVE_JITDUMP |
4222 | endif |
4223 | diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c |
4224 | index d1ce29be560e..cd7bc4d104e2 100644 |
4225 | --- a/tools/perf/builtin-mem.c |
4226 | +++ b/tools/perf/builtin-mem.c |
4227 | @@ -70,8 +70,8 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) |
4228 | OPT_UINTEGER(0, "ldlat", &perf_mem_events__loads_ldlat, "mem-loads latency"), |
4229 | OPT_INCR('v', "verbose", &verbose, |
4230 | "be more verbose (show counter open errors, etc)"), |
4231 | - OPT_BOOLEAN('U', "--all-user", &all_user, "collect only user level data"), |
4232 | - OPT_BOOLEAN('K', "--all-kernel", &all_kernel, "collect only kernel level data"), |
4233 | + OPT_BOOLEAN('U', "all-user", &all_user, "collect only user level data"), |
4234 | + OPT_BOOLEAN('K', "all-kernel", &all_kernel, "collect only kernel level data"), |
4235 | OPT_END() |
4236 | }; |
4237 | |
4238 | diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c |
4239 | index c298bd3e1d90..21f8a81797a0 100644 |
4240 | --- a/tools/perf/builtin-trace.c |
4241 | +++ b/tools/perf/builtin-trace.c |
4242 | @@ -1452,7 +1452,7 @@ static int trace__printf_interrupted_entry(struct trace *trace, struct perf_samp |
4243 | |
4244 | duration = sample->time - ttrace->entry_time; |
4245 | |
4246 | - printed = trace__fprintf_entry_head(trace, trace->current, duration, sample->time, trace->output); |
4247 | + printed = trace__fprintf_entry_head(trace, trace->current, duration, ttrace->entry_time, trace->output); |
4248 | printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str); |
4249 | ttrace->entry_pending = false; |
4250 | |
4251 | @@ -1499,7 +1499,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, |
4252 | |
4253 | if (sc->is_exit) { |
4254 | if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) { |
4255 | - trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output); |
4256 | + trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, trace->output); |
4257 | fprintf(trace->output, "%-70s)\n", ttrace->entry_str); |
4258 | } |
4259 | } else { |
4260 | @@ -1592,7 +1592,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, |
4261 | if (trace->summary_only) |
4262 | goto out; |
4263 | |
4264 | - trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output); |
4265 | + trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, trace->output); |
4266 | |
4267 | if (ttrace->entry_pending) { |
4268 | fprintf(trace->output, "%-70s", ttrace->entry_str); |
4269 | diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c |
4270 | index fd710ab33684..af1cfde6b97b 100644 |
4271 | --- a/tools/perf/trace/beauty/mmap.c |
4272 | +++ b/tools/perf/trace/beauty/mmap.c |
4273 | @@ -42,7 +42,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size, |
4274 | |
4275 | P_MMAP_FLAG(SHARED); |
4276 | P_MMAP_FLAG(PRIVATE); |
4277 | +#ifdef MAP_32BIT |
4278 | P_MMAP_FLAG(32BIT); |
4279 | +#endif |
4280 | P_MMAP_FLAG(ANONYMOUS); |
4281 | P_MMAP_FLAG(DENYWRITE); |
4282 | P_MMAP_FLAG(EXECUTABLE); |
4283 | diff --git a/tools/perf/util/Build b/tools/perf/util/Build |
4284 | index eb60e613d795..1dc67efad634 100644 |
4285 | --- a/tools/perf/util/Build |
4286 | +++ b/tools/perf/util/Build |
4287 | @@ -120,7 +120,7 @@ libperf-y += demangle-rust.o |
4288 | ifdef CONFIG_JITDUMP |
4289 | libperf-$(CONFIG_LIBELF) += jitdump.o |
4290 | libperf-$(CONFIG_LIBELF) += genelf.o |
4291 | -libperf-$(CONFIG_LIBELF) += genelf_debug.o |
4292 | +libperf-$(CONFIG_DWARF) += genelf_debug.o |
4293 | endif |
4294 | |
4295 | CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" |
4296 | diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c |
4297 | index 07fd30bc2f81..ae58b493af45 100644 |
4298 | --- a/tools/perf/util/callchain.c |
4299 | +++ b/tools/perf/util/callchain.c |
4300 | @@ -193,7 +193,6 @@ int perf_callchain_config(const char *var, const char *value) |
4301 | |
4302 | if (!strcmp(var, "record-mode")) |
4303 | return parse_callchain_record_opt(value, &callchain_param); |
4304 | -#ifdef HAVE_DWARF_UNWIND_SUPPORT |
4305 | if (!strcmp(var, "dump-size")) { |
4306 | unsigned long size = 0; |
4307 | int ret; |
4308 | @@ -203,7 +202,6 @@ int perf_callchain_config(const char *var, const char *value) |
4309 | |
4310 | return ret; |
4311 | } |
4312 | -#endif |
4313 | if (!strcmp(var, "print-type")) |
4314 | return parse_callchain_mode(value); |
4315 | if (!strcmp(var, "order")) |
4316 | diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h |
4317 | index 13e75549c440..47cfd1080975 100644 |
4318 | --- a/tools/perf/util/callchain.h |
4319 | +++ b/tools/perf/util/callchain.h |
4320 | @@ -11,11 +11,7 @@ |
4321 | |
4322 | #define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace):\n\n" |
4323 | |
4324 | -#ifdef HAVE_DWARF_UNWIND_SUPPORT |
4325 | # define RECORD_MODE_HELP HELP_PAD "record_mode:\tcall graph recording mode (fp|dwarf|lbr)\n" |
4326 | -#else |
4327 | -# define RECORD_MODE_HELP HELP_PAD "record_mode:\tcall graph recording mode (fp|lbr)\n" |
4328 | -#endif |
4329 | |
4330 | #define RECORD_SIZE_HELP \ |
4331 | HELP_PAD "record_size:\tif record_mode is 'dwarf', max size of stack recording (<bytes>)\n" \ |
4332 | diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c |
4333 | index c1ef805c6a8f..14a73acc549c 100644 |
4334 | --- a/tools/perf/util/genelf.c |
4335 | +++ b/tools/perf/util/genelf.c |
4336 | @@ -19,7 +19,9 @@ |
4337 | #include <limits.h> |
4338 | #include <fcntl.h> |
4339 | #include <err.h> |
4340 | +#ifdef HAVE_DWARF_SUPPORT |
4341 | #include <dwarf.h> |
4342 | +#endif |
4343 | |
4344 | #include "perf.h" |
4345 | #include "genelf.h" |
4346 | @@ -157,7 +159,7 @@ gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *cod |
4347 | int |
4348 | jit_write_elf(int fd, uint64_t load_addr, const char *sym, |
4349 | const void *code, int csize, |
4350 | - void *debug, int nr_debug_entries) |
4351 | + void *debug __maybe_unused, int nr_debug_entries __maybe_unused) |
4352 | { |
4353 | Elf *e; |
4354 | Elf_Data *d; |
4355 | @@ -386,11 +388,14 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym, |
4356 | shdr->sh_size = sizeof(bnote); |
4357 | shdr->sh_entsize = 0; |
4358 | |
4359 | +#ifdef HAVE_DWARF_SUPPORT |
4360 | if (debug && nr_debug_entries) { |
4361 | retval = jit_add_debug_info(e, load_addr, debug, nr_debug_entries); |
4362 | if (retval) |
4363 | goto error; |
4364 | - } else { |
4365 | + } else |
4366 | +#endif |
4367 | + { |
4368 | if (elf_update(e, ELF_C_WRITE) < 0) { |
4369 | warnx("elf_update 4 failed"); |
4370 | goto error; |
4371 | diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h |
4372 | index 2fbeb59c4bdd..5c933ac71451 100644 |
4373 | --- a/tools/perf/util/genelf.h |
4374 | +++ b/tools/perf/util/genelf.h |
4375 | @@ -4,8 +4,10 @@ |
4376 | /* genelf.c */ |
4377 | int jit_write_elf(int fd, uint64_t code_addr, const char *sym, |
4378 | const void *code, int csize, void *debug, int nr_debug_entries); |
4379 | +#ifdef HAVE_DWARF_SUPPORT |
4380 | /* genelf_debug.c */ |
4381 | int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_entries); |
4382 | +#endif |
4383 | |
4384 | #if defined(__arm__) |
4385 | #define GEN_ELF_ARCH EM_ARM |
4386 | diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c |
4387 | index aecff69a510d..f7b35e178582 100644 |
4388 | --- a/tools/perf/util/symbol.c |
4389 | +++ b/tools/perf/util/symbol.c |
4390 | @@ -1459,7 +1459,8 @@ int dso__load(struct dso *dso, struct map *map) |
4391 | * Read the build id if possible. This is required for |
4392 | * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work |
4393 | */ |
4394 | - if (is_regular_file(dso->long_name) && |
4395 | + if (!dso->has_build_id && |
4396 | + is_regular_file(dso->long_name) && |
4397 | filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0) |
4398 | dso__set_build_id(dso, build_id); |
4399 | |
4400 | diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c |
4401 | index 9df61059a85d..a2fd6e79d5a5 100644 |
4402 | --- a/tools/perf/util/trace-event-scripting.c |
4403 | +++ b/tools/perf/util/trace-event-scripting.c |
4404 | @@ -95,7 +95,8 @@ static void register_python_scripting(struct scripting_ops *scripting_ops) |
4405 | if (err) |
4406 | die("error registering py script extension"); |
4407 | |
4408 | - scripting_context = malloc(sizeof(struct scripting_context)); |
4409 | + if (scripting_context == NULL) |
4410 | + scripting_context = malloc(sizeof(*scripting_context)); |
4411 | } |
4412 | |
4413 | #ifdef NO_LIBPYTHON |
4414 | @@ -159,7 +160,8 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops) |
4415 | if (err) |
4416 | die("error registering pl script extension"); |
4417 | |
4418 | - scripting_context = malloc(sizeof(struct scripting_context)); |
4419 | + if (scripting_context == NULL) |
4420 | + scripting_context = malloc(sizeof(*scripting_context)); |
4421 | } |
4422 | |
4423 | #ifdef NO_LIBPERL |
4424 | diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c |
4425 | index c22860ab9733..30e1ac62e8cb 100644 |
4426 | --- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c |
4427 | +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c |
4428 | @@ -66,7 +66,7 @@ int pmc56_overflow(void) |
4429 | |
4430 | FAIL_IF(ebb_event_enable(&event)); |
4431 | |
4432 | - mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); |
4433 | + mtspr(SPRN_PMC2, pmc_sample_period(sample_period)); |
4434 | mtspr(SPRN_PMC5, 0); |
4435 | mtspr(SPRN_PMC6, 0); |
4436 | |
4437 | diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh |
4438 | index 2e69ca812b4c..29b0d3920bfc 100755 |
4439 | --- a/tools/virtio/ringtest/run-on-all.sh |
4440 | +++ b/tools/virtio/ringtest/run-on-all.sh |
4441 | @@ -1,12 +1,13 @@ |
4442 | #!/bin/sh |
4443 | |
4444 | +CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#') |
4445 | #use last CPU for host. Why not the first? |
4446 | #many devices tend to use cpu0 by default so |
4447 | #it tends to be busier |
4448 | -HOST_AFFINITY=$(lscpu -p=cpu | tail -1) |
4449 | +HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1) |
4450 | |
4451 | #run command on all cpus |
4452 | -for cpu in $(seq 0 $HOST_AFFINITY) |
4453 | +for cpu in $CPUS_ONLINE |
4454 | do |
4455 | #Don't run guest and host on same CPU |
4456 | #It actually works ok if using signalling |
4457 | diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c |
4458 | index 8cebfbc19e90..539d3f5cb619 100644 |
4459 | --- a/virt/kvm/arm/vgic/vgic-init.c |
4460 | +++ b/virt/kvm/arm/vgic/vgic-init.c |
4461 | @@ -268,15 +268,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) |
4462 | { |
4463 | struct vgic_dist *dist = &kvm->arch.vgic; |
4464 | |
4465 | - mutex_lock(&kvm->lock); |
4466 | - |
4467 | dist->ready = false; |
4468 | dist->initialized = false; |
4469 | |
4470 | kfree(dist->spis); |
4471 | dist->nr_spis = 0; |
4472 | - |
4473 | - mutex_unlock(&kvm->lock); |
4474 | } |
4475 | |
4476 | void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) |
4477 | @@ -286,7 +282,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) |
4478 | INIT_LIST_HEAD(&vgic_cpu->ap_list_head); |
4479 | } |
4480 | |
4481 | -void kvm_vgic_destroy(struct kvm *kvm) |
4482 | +/* To be called with kvm->lock held */ |
4483 | +static void __kvm_vgic_destroy(struct kvm *kvm) |
4484 | { |
4485 | struct kvm_vcpu *vcpu; |
4486 | int i; |
4487 | @@ -297,6 +294,13 @@ void kvm_vgic_destroy(struct kvm *kvm) |
4488 | kvm_vgic_vcpu_destroy(vcpu); |
4489 | } |
4490 | |
4491 | +void kvm_vgic_destroy(struct kvm *kvm) |
4492 | +{ |
4493 | + mutex_lock(&kvm->lock); |
4494 | + __kvm_vgic_destroy(kvm); |
4495 | + mutex_unlock(&kvm->lock); |
4496 | +} |
4497 | + |
4498 | /** |
4499 | * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest |
4500 | * is a GICv2. A GICv3 must be explicitly initialized by the guest using the |
4501 | @@ -348,6 +352,10 @@ int kvm_vgic_map_resources(struct kvm *kvm) |
4502 | ret = vgic_v2_map_resources(kvm); |
4503 | else |
4504 | ret = vgic_v3_map_resources(kvm); |
4505 | + |
4506 | + if (ret) |
4507 | + __kvm_vgic_destroy(kvm); |
4508 | + |
4509 | out: |
4510 | mutex_unlock(&kvm->lock); |
4511 | return ret; |
4512 | diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c |
4513 | index 9bab86757fa4..834137e7b83f 100644 |
4514 | --- a/virt/kvm/arm/vgic/vgic-v2.c |
4515 | +++ b/virt/kvm/arm/vgic/vgic-v2.c |
4516 | @@ -293,8 +293,6 @@ int vgic_v2_map_resources(struct kvm *kvm) |
4517 | dist->ready = true; |
4518 | |
4519 | out: |
4520 | - if (ret) |
4521 | - kvm_vgic_destroy(kvm); |
4522 | return ret; |
4523 | } |
4524 | |
4525 | diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c |
4526 | index 5c9f9745e6ca..e6b03fd8c374 100644 |
4527 | --- a/virt/kvm/arm/vgic/vgic-v3.c |
4528 | +++ b/virt/kvm/arm/vgic/vgic-v3.c |
4529 | @@ -302,8 +302,6 @@ int vgic_v3_map_resources(struct kvm *kvm) |
4530 | dist->ready = true; |
4531 | |
4532 | out: |
4533 | - if (ret) |
4534 | - kvm_vgic_destroy(kvm); |
4535 | return ret; |
4536 | } |
4537 |