Contents of /trunk/kernel-alx/patches-5.4/0174-5.4.75-all-fixes.patch
Parent Directory | Revision Log
Revision 3635 -
(show annotations)
(download)
Mon Oct 24 12:34:12 2022 UTC (18 months, 1 week ago) by niro
File size: 306037 byte(s)
Mon Oct 24 12:34:12 2022 UTC (18 months, 1 week ago) by niro
File size: 306037 byte(s)
-sync kernel patches
1 | diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt |
2 | index 988a0d2535b25..5b4753e602def 100644 |
3 | --- a/Documentation/admin-guide/kernel-parameters.txt |
4 | +++ b/Documentation/admin-guide/kernel-parameters.txt |
5 | @@ -5462,6 +5462,14 @@ |
6 | as generic guest with no PV drivers. Currently support |
7 | XEN HVM, KVM, HYPER_V and VMWARE guest. |
8 | |
9 | + xen.event_eoi_delay= [XEN] |
10 | + How long to delay EOI handling in case of event |
11 | + storms (jiffies). Default is 10. |
12 | + |
13 | + xen.event_loop_timeout= [XEN] |
14 | + After which time (jiffies) the event handling loop |
15 | + should start to delay EOI handling. Default is 2. |
16 | + |
17 | xirc2ps_cs= [NET,PCMCIA] |
18 | Format: |
19 | <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] |
20 | diff --git a/Documentation/media/uapi/v4l/colorspaces-defs.rst b/Documentation/media/uapi/v4l/colorspaces-defs.rst |
21 | index e122bbe3d799d..aabb08130354a 100644 |
22 | --- a/Documentation/media/uapi/v4l/colorspaces-defs.rst |
23 | +++ b/Documentation/media/uapi/v4l/colorspaces-defs.rst |
24 | @@ -36,8 +36,7 @@ whole range, 0-255, dividing the angular value by 1.41. The enum |
25 | :c:type:`v4l2_hsv_encoding` specifies which encoding is used. |
26 | |
27 | .. note:: The default R'G'B' quantization is full range for all |
28 | - colorspaces except for BT.2020 which uses limited range R'G'B' |
29 | - quantization. |
30 | + colorspaces. HSV formats are always full range. |
31 | |
32 | .. tabularcolumns:: |p{6.7cm}|p{10.8cm}| |
33 | |
34 | @@ -169,8 +168,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum |
35 | - Details |
36 | * - ``V4L2_QUANTIZATION_DEFAULT`` |
37 | - Use the default quantization encoding as defined by the |
38 | - colorspace. This is always full range for R'G'B' (except for the |
39 | - BT.2020 colorspace) and HSV. It is usually limited range for Y'CbCr. |
40 | + colorspace. This is always full range for R'G'B' and HSV. |
41 | + It is usually limited range for Y'CbCr. |
42 | * - ``V4L2_QUANTIZATION_FULL_RANGE`` |
43 | - Use the full range quantization encoding. I.e. the range [0…1] is |
44 | mapped to [0…255] (with possible clipping to [1…254] to avoid the |
45 | @@ -180,4 +179,4 @@ whole range, 0-255, dividing the angular value by 1.41. The enum |
46 | * - ``V4L2_QUANTIZATION_LIM_RANGE`` |
47 | - Use the limited range quantization encoding. I.e. the range [0…1] |
48 | is mapped to [16…235]. Cb and Cr are mapped from [-0.5…0.5] to |
49 | - [16…240]. |
50 | + [16…240]. Limited Range cannot be used with HSV. |
51 | diff --git a/Documentation/media/uapi/v4l/colorspaces-details.rst b/Documentation/media/uapi/v4l/colorspaces-details.rst |
52 | index 8b0ba3668101d..fd0cf57691d87 100644 |
53 | --- a/Documentation/media/uapi/v4l/colorspaces-details.rst |
54 | +++ b/Documentation/media/uapi/v4l/colorspaces-details.rst |
55 | @@ -377,9 +377,8 @@ Colorspace BT.2020 (V4L2_COLORSPACE_BT2020) |
56 | The :ref:`itu2020` standard defines the colorspace used by Ultra-high |
57 | definition television (UHDTV). The default transfer function is |
58 | ``V4L2_XFER_FUNC_709``. The default Y'CbCr encoding is |
59 | -``V4L2_YCBCR_ENC_BT2020``. The default R'G'B' quantization is limited |
60 | -range (!), and so is the default Y'CbCr quantization. The chromaticities |
61 | -of the primary colors and the white reference are: |
62 | +``V4L2_YCBCR_ENC_BT2020``. The default Y'CbCr quantization is limited range. |
63 | +The chromaticities of the primary colors and the white reference are: |
64 | |
65 | |
66 | |
67 | diff --git a/Makefile b/Makefile |
68 | index 3be5a9c352b9c..d38d0cab8e9aa 100644 |
69 | --- a/Makefile |
70 | +++ b/Makefile |
71 | @@ -1,7 +1,7 @@ |
72 | # SPDX-License-Identifier: GPL-2.0 |
73 | VERSION = 5 |
74 | PATCHLEVEL = 4 |
75 | -SUBLEVEL = 74 |
76 | +SUBLEVEL = 75 |
77 | EXTRAVERSION = |
78 | NAME = Kleptomaniac Octopus |
79 | |
80 | diff --git a/arch/Kconfig b/arch/Kconfig |
81 | index 238dccfa76910..84653a823d3b0 100644 |
82 | --- a/arch/Kconfig |
83 | +++ b/arch/Kconfig |
84 | @@ -405,6 +405,13 @@ config MMU_GATHER_NO_RANGE |
85 | config HAVE_MMU_GATHER_NO_GATHER |
86 | bool |
87 | |
88 | +config ARCH_WANT_IRQS_OFF_ACTIVATE_MM |
89 | + bool |
90 | + help |
91 | + Temporary select until all architectures can be converted to have |
92 | + irqs disabled over activate_mm. Architectures that do IPI based TLB |
93 | + shootdowns should enable this. |
94 | + |
95 | config ARCH_HAVE_NMI_SAFE_CMPXCHG |
96 | bool |
97 | |
98 | diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi |
99 | index 6ec1fcdfc0d7f..92247288d0562 100644 |
100 | --- a/arch/arc/boot/dts/axc001.dtsi |
101 | +++ b/arch/arc/boot/dts/axc001.dtsi |
102 | @@ -85,7 +85,7 @@ |
103 | * avoid duplicating the MB dtsi file given that IRQ from |
104 | * this intc to cpu intc are different for axs101 and axs103 |
105 | */ |
106 | - mb_intc: dw-apb-ictl@e0012000 { |
107 | + mb_intc: interrupt-controller@e0012000 { |
108 | #interrupt-cells = <1>; |
109 | compatible = "snps,dw-apb-ictl"; |
110 | reg = < 0x0 0xe0012000 0x0 0x200 >; |
111 | diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi |
112 | index ac8e1b463a709..cd1edcf4f95ef 100644 |
113 | --- a/arch/arc/boot/dts/axc003.dtsi |
114 | +++ b/arch/arc/boot/dts/axc003.dtsi |
115 | @@ -129,7 +129,7 @@ |
116 | * avoid duplicating the MB dtsi file given that IRQ from |
117 | * this intc to cpu intc are different for axs101 and axs103 |
118 | */ |
119 | - mb_intc: dw-apb-ictl@e0012000 { |
120 | + mb_intc: interrupt-controller@e0012000 { |
121 | #interrupt-cells = <1>; |
122 | compatible = "snps,dw-apb-ictl"; |
123 | reg = < 0x0 0xe0012000 0x0 0x200 >; |
124 | diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi |
125 | index 9da21e7fd246f..70779386ca796 100644 |
126 | --- a/arch/arc/boot/dts/axc003_idu.dtsi |
127 | +++ b/arch/arc/boot/dts/axc003_idu.dtsi |
128 | @@ -135,7 +135,7 @@ |
129 | * avoid duplicating the MB dtsi file given that IRQ from |
130 | * this intc to cpu intc are different for axs101 and axs103 |
131 | */ |
132 | - mb_intc: dw-apb-ictl@e0012000 { |
133 | + mb_intc: interrupt-controller@e0012000 { |
134 | #interrupt-cells = <1>; |
135 | compatible = "snps,dw-apb-ictl"; |
136 | reg = < 0x0 0xe0012000 0x0 0x200 >; |
137 | diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi |
138 | index f8be7ba8dad49..c21d0eb07bf67 100644 |
139 | --- a/arch/arc/boot/dts/vdk_axc003.dtsi |
140 | +++ b/arch/arc/boot/dts/vdk_axc003.dtsi |
141 | @@ -46,7 +46,7 @@ |
142 | |
143 | }; |
144 | |
145 | - mb_intc: dw-apb-ictl@e0012000 { |
146 | + mb_intc: interrupt-controller@e0012000 { |
147 | #interrupt-cells = <1>; |
148 | compatible = "snps,dw-apb-ictl"; |
149 | reg = < 0xe0012000 0x200 >; |
150 | diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi |
151 | index 0afa3e53a4e39..4d348853ac7c5 100644 |
152 | --- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi |
153 | +++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi |
154 | @@ -54,7 +54,7 @@ |
155 | |
156 | }; |
157 | |
158 | - mb_intc: dw-apb-ictl@e0012000 { |
159 | + mb_intc: interrupt-controller@e0012000 { |
160 | #interrupt-cells = <1>; |
161 | compatible = "snps,dw-apb-ictl"; |
162 | reg = < 0xe0012000 0x200 >; |
163 | diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c |
164 | index 79849f37e782c..145722f80c9b7 100644 |
165 | --- a/arch/arc/kernel/perf_event.c |
166 | +++ b/arch/arc/kernel/perf_event.c |
167 | @@ -562,7 +562,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) |
168 | { |
169 | struct arc_reg_pct_build pct_bcr; |
170 | struct arc_reg_cc_build cc_bcr; |
171 | - int i, has_interrupts, irq; |
172 | + int i, has_interrupts, irq = -1; |
173 | int counter_size; /* in bits */ |
174 | |
175 | union cc_name { |
176 | @@ -637,19 +637,28 @@ static int arc_pmu_device_probe(struct platform_device *pdev) |
177 | .attr_groups = arc_pmu->attr_groups, |
178 | }; |
179 | |
180 | - if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) { |
181 | + if (has_interrupts) { |
182 | + irq = platform_get_irq(pdev, 0); |
183 | + if (irq >= 0) { |
184 | + int ret; |
185 | |
186 | - arc_pmu->irq = irq; |
187 | + arc_pmu->irq = irq; |
188 | |
189 | - /* intc map function ensures irq_set_percpu_devid() called */ |
190 | - request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters", |
191 | - this_cpu_ptr(&arc_pmu_cpu)); |
192 | + /* intc map function ensures irq_set_percpu_devid() called */ |
193 | + ret = request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters", |
194 | + this_cpu_ptr(&arc_pmu_cpu)); |
195 | + |
196 | + if (!ret) |
197 | + on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); |
198 | + else |
199 | + irq = -1; |
200 | + } |
201 | |
202 | - on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); |
203 | - } else { |
204 | - arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; |
205 | } |
206 | |
207 | + if (irq == -1) |
208 | + arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; |
209 | + |
210 | /* |
211 | * perf parser doesn't really like '-' symbol in events name, so let's |
212 | * use '_' in arc pct name as it goes to kernel PMU event prefix. |
213 | diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig |
214 | index 05c9bbfe444df..9aa88715f196c 100644 |
215 | --- a/arch/arm/Kconfig |
216 | +++ b/arch/arm/Kconfig |
217 | @@ -507,8 +507,10 @@ config ARCH_S3C24XX |
218 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
219 | select HAVE_S3C_RTC if RTC_CLASS |
220 | select NEED_MACH_IO_H |
221 | + select S3C2410_WATCHDOG |
222 | select SAMSUNG_ATAGS |
223 | select USE_OF |
224 | + select WATCHDOG |
225 | help |
226 | Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443 |
227 | and S3C2450 SoCs based systems, such as the Simtec Electronics BAST |
228 | diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts |
229 | index 2b760f90f38c8..5375c6699843f 100644 |
230 | --- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts |
231 | +++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts |
232 | @@ -192,6 +192,7 @@ |
233 | fixed-link { |
234 | speed = <1000>; |
235 | full-duplex; |
236 | + pause; |
237 | }; |
238 | }; |
239 | }; |
240 | diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi |
241 | index e5506ab669fc6..904852006b9b1 100644 |
242 | --- a/arch/arm/boot/dts/omap4.dtsi |
243 | +++ b/arch/arm/boot/dts/omap4.dtsi |
244 | @@ -328,7 +328,7 @@ |
245 | status = "disabled"; |
246 | }; |
247 | |
248 | - target-module@56000000 { |
249 | + sgx_module: target-module@56000000 { |
250 | compatible = "ti,sysc-omap4", "ti,sysc"; |
251 | reg = <0x5600fe00 0x4>, |
252 | <0x5600fe10 0x4>; |
253 | diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi |
254 | index cbcdcb4e7d1c2..86b9caf461dfa 100644 |
255 | --- a/arch/arm/boot/dts/omap443x.dtsi |
256 | +++ b/arch/arm/boot/dts/omap443x.dtsi |
257 | @@ -74,3 +74,13 @@ |
258 | }; |
259 | |
260 | /include/ "omap443x-clocks.dtsi" |
261 | + |
262 | +/* |
263 | + * Use dpll_per for sgx at 153.6MHz like droid4 stock v3.0.8 Android kernel |
264 | + */ |
265 | +&sgx_module { |
266 | + assigned-clocks = <&l3_gfx_clkctrl OMAP4_GPU_CLKCTRL 24>, |
267 | + <&dpll_per_m7x2_ck>; |
268 | + assigned-clock-rates = <0>, <153600000>; |
269 | + assigned-clock-parents = <&dpll_per_m7x2_ck>; |
270 | +}; |
271 | diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi |
272 | index 2ad642f51fd92..61822afa30ab3 100644 |
273 | --- a/arch/arm/boot/dts/s5pv210.dtsi |
274 | +++ b/arch/arm/boot/dts/s5pv210.dtsi |
275 | @@ -52,34 +52,26 @@ |
276 | }; |
277 | }; |
278 | |
279 | + xxti: oscillator-0 { |
280 | + compatible = "fixed-clock"; |
281 | + clock-frequency = <0>; |
282 | + clock-output-names = "xxti"; |
283 | + #clock-cells = <0>; |
284 | + }; |
285 | + |
286 | + xusbxti: oscillator-1 { |
287 | + compatible = "fixed-clock"; |
288 | + clock-frequency = <0>; |
289 | + clock-output-names = "xusbxti"; |
290 | + #clock-cells = <0>; |
291 | + }; |
292 | + |
293 | soc { |
294 | compatible = "simple-bus"; |
295 | #address-cells = <1>; |
296 | #size-cells = <1>; |
297 | ranges; |
298 | |
299 | - external-clocks { |
300 | - compatible = "simple-bus"; |
301 | - #address-cells = <1>; |
302 | - #size-cells = <0>; |
303 | - |
304 | - xxti: oscillator@0 { |
305 | - compatible = "fixed-clock"; |
306 | - reg = <0>; |
307 | - clock-frequency = <0>; |
308 | - clock-output-names = "xxti"; |
309 | - #clock-cells = <0>; |
310 | - }; |
311 | - |
312 | - xusbxti: oscillator@1 { |
313 | - compatible = "fixed-clock"; |
314 | - reg = <1>; |
315 | - clock-frequency = <0>; |
316 | - clock-output-names = "xusbxti"; |
317 | - #clock-cells = <0>; |
318 | - }; |
319 | - }; |
320 | - |
321 | onenand: onenand@b0600000 { |
322 | compatible = "samsung,s5pv210-onenand"; |
323 | reg = <0xb0600000 0x2000>, |
324 | @@ -100,19 +92,16 @@ |
325 | }; |
326 | |
327 | clocks: clock-controller@e0100000 { |
328 | - compatible = "samsung,s5pv210-clock", "simple-bus"; |
329 | + compatible = "samsung,s5pv210-clock"; |
330 | reg = <0xe0100000 0x10000>; |
331 | clock-names = "xxti", "xusbxti"; |
332 | clocks = <&xxti>, <&xusbxti>; |
333 | #clock-cells = <1>; |
334 | - #address-cells = <1>; |
335 | - #size-cells = <1>; |
336 | - ranges; |
337 | + }; |
338 | |
339 | - pmu_syscon: syscon@e0108000 { |
340 | - compatible = "samsung-s5pv210-pmu", "syscon"; |
341 | - reg = <0xe0108000 0x8000>; |
342 | - }; |
343 | + pmu_syscon: syscon@e0108000 { |
344 | + compatible = "samsung-s5pv210-pmu", "syscon"; |
345 | + reg = <0xe0108000 0x8000>; |
346 | }; |
347 | |
348 | pinctrl0: pinctrl@e0200000 { |
349 | @@ -128,35 +117,28 @@ |
350 | }; |
351 | }; |
352 | |
353 | - amba { |
354 | - #address-cells = <1>; |
355 | - #size-cells = <1>; |
356 | - compatible = "simple-bus"; |
357 | - ranges; |
358 | - |
359 | - pdma0: dma@e0900000 { |
360 | - compatible = "arm,pl330", "arm,primecell"; |
361 | - reg = <0xe0900000 0x1000>; |
362 | - interrupt-parent = <&vic0>; |
363 | - interrupts = <19>; |
364 | - clocks = <&clocks CLK_PDMA0>; |
365 | - clock-names = "apb_pclk"; |
366 | - #dma-cells = <1>; |
367 | - #dma-channels = <8>; |
368 | - #dma-requests = <32>; |
369 | - }; |
370 | + pdma0: dma@e0900000 { |
371 | + compatible = "arm,pl330", "arm,primecell"; |
372 | + reg = <0xe0900000 0x1000>; |
373 | + interrupt-parent = <&vic0>; |
374 | + interrupts = <19>; |
375 | + clocks = <&clocks CLK_PDMA0>; |
376 | + clock-names = "apb_pclk"; |
377 | + #dma-cells = <1>; |
378 | + #dma-channels = <8>; |
379 | + #dma-requests = <32>; |
380 | + }; |
381 | |
382 | - pdma1: dma@e0a00000 { |
383 | - compatible = "arm,pl330", "arm,primecell"; |
384 | - reg = <0xe0a00000 0x1000>; |
385 | - interrupt-parent = <&vic0>; |
386 | - interrupts = <20>; |
387 | - clocks = <&clocks CLK_PDMA1>; |
388 | - clock-names = "apb_pclk"; |
389 | - #dma-cells = <1>; |
390 | - #dma-channels = <8>; |
391 | - #dma-requests = <32>; |
392 | - }; |
393 | + pdma1: dma@e0a00000 { |
394 | + compatible = "arm,pl330", "arm,primecell"; |
395 | + reg = <0xe0a00000 0x1000>; |
396 | + interrupt-parent = <&vic0>; |
397 | + interrupts = <20>; |
398 | + clocks = <&clocks CLK_PDMA1>; |
399 | + clock-names = "apb_pclk"; |
400 | + #dma-cells = <1>; |
401 | + #dma-channels = <8>; |
402 | + #dma-requests = <32>; |
403 | }; |
404 | |
405 | spi0: spi@e1300000 { |
406 | @@ -229,43 +211,36 @@ |
407 | status = "disabled"; |
408 | }; |
409 | |
410 | - audio-subsystem { |
411 | - compatible = "samsung,s5pv210-audss", "simple-bus"; |
412 | - #address-cells = <1>; |
413 | - #size-cells = <1>; |
414 | - ranges; |
415 | - |
416 | - clk_audss: clock-controller@eee10000 { |
417 | - compatible = "samsung,s5pv210-audss-clock"; |
418 | - reg = <0xeee10000 0x1000>; |
419 | - clock-names = "hclk", "xxti", |
420 | - "fout_epll", |
421 | - "sclk_audio0"; |
422 | - clocks = <&clocks DOUT_HCLKP>, <&xxti>, |
423 | - <&clocks FOUT_EPLL>, |
424 | - <&clocks SCLK_AUDIO0>; |
425 | - #clock-cells = <1>; |
426 | - }; |
427 | + clk_audss: clock-controller@eee10000 { |
428 | + compatible = "samsung,s5pv210-audss-clock"; |
429 | + reg = <0xeee10000 0x1000>; |
430 | + clock-names = "hclk", "xxti", |
431 | + "fout_epll", |
432 | + "sclk_audio0"; |
433 | + clocks = <&clocks DOUT_HCLKP>, <&xxti>, |
434 | + <&clocks FOUT_EPLL>, |
435 | + <&clocks SCLK_AUDIO0>; |
436 | + #clock-cells = <1>; |
437 | + }; |
438 | |
439 | - i2s0: i2s@eee30000 { |
440 | - compatible = "samsung,s5pv210-i2s"; |
441 | - reg = <0xeee30000 0x1000>; |
442 | - interrupt-parent = <&vic2>; |
443 | - interrupts = <16>; |
444 | - dma-names = "rx", "tx", "tx-sec"; |
445 | - dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>; |
446 | - clock-names = "iis", |
447 | - "i2s_opclk0", |
448 | - "i2s_opclk1"; |
449 | - clocks = <&clk_audss CLK_I2S>, |
450 | - <&clk_audss CLK_I2S>, |
451 | - <&clk_audss CLK_DOUT_AUD_BUS>; |
452 | - samsung,idma-addr = <0xc0010000>; |
453 | - pinctrl-names = "default"; |
454 | - pinctrl-0 = <&i2s0_bus>; |
455 | - #sound-dai-cells = <0>; |
456 | - status = "disabled"; |
457 | - }; |
458 | + i2s0: i2s@eee30000 { |
459 | + compatible = "samsung,s5pv210-i2s"; |
460 | + reg = <0xeee30000 0x1000>; |
461 | + interrupt-parent = <&vic2>; |
462 | + interrupts = <16>; |
463 | + dma-names = "rx", "tx", "tx-sec"; |
464 | + dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>; |
465 | + clock-names = "iis", |
466 | + "i2s_opclk0", |
467 | + "i2s_opclk1"; |
468 | + clocks = <&clk_audss CLK_I2S>, |
469 | + <&clk_audss CLK_I2S>, |
470 | + <&clk_audss CLK_DOUT_AUD_BUS>; |
471 | + samsung,idma-addr = <0xc0010000>; |
472 | + pinctrl-names = "default"; |
473 | + pinctrl-0 = <&i2s0_bus>; |
474 | + #sound-dai-cells = <0>; |
475 | + status = "disabled"; |
476 | }; |
477 | |
478 | i2s1: i2s@e2100000 { |
479 | diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c |
480 | index 5f95e4b911a0b..7021ef0b4e71b 100644 |
481 | --- a/arch/arm/kernel/hw_breakpoint.c |
482 | +++ b/arch/arm/kernel/hw_breakpoint.c |
483 | @@ -680,6 +680,40 @@ static void disable_single_step(struct perf_event *bp) |
484 | arch_install_hw_breakpoint(bp); |
485 | } |
486 | |
487 | +/* |
488 | + * Arm32 hardware does not always report a watchpoint hit address that matches |
489 | + * one of the watchpoints set. It can also report an address "near" the |
490 | + * watchpoint if a single instruction access both watched and unwatched |
491 | + * addresses. There is no straight-forward way, short of disassembling the |
492 | + * offending instruction, to map that address back to the watchpoint. This |
493 | + * function computes the distance of the memory access from the watchpoint as a |
494 | + * heuristic for the likelyhood that a given access triggered the watchpoint. |
495 | + * |
496 | + * See this same function in the arm64 platform code, which has the same |
497 | + * problem. |
498 | + * |
499 | + * The function returns the distance of the address from the bytes watched by |
500 | + * the watchpoint. In case of an exact match, it returns 0. |
501 | + */ |
502 | +static u32 get_distance_from_watchpoint(unsigned long addr, u32 val, |
503 | + struct arch_hw_breakpoint_ctrl *ctrl) |
504 | +{ |
505 | + u32 wp_low, wp_high; |
506 | + u32 lens, lene; |
507 | + |
508 | + lens = __ffs(ctrl->len); |
509 | + lene = __fls(ctrl->len); |
510 | + |
511 | + wp_low = val + lens; |
512 | + wp_high = val + lene; |
513 | + if (addr < wp_low) |
514 | + return wp_low - addr; |
515 | + else if (addr > wp_high) |
516 | + return addr - wp_high; |
517 | + else |
518 | + return 0; |
519 | +} |
520 | + |
521 | static int watchpoint_fault_on_uaccess(struct pt_regs *regs, |
522 | struct arch_hw_breakpoint *info) |
523 | { |
524 | @@ -689,23 +723,25 @@ static int watchpoint_fault_on_uaccess(struct pt_regs *regs, |
525 | static void watchpoint_handler(unsigned long addr, unsigned int fsr, |
526 | struct pt_regs *regs) |
527 | { |
528 | - int i, access; |
529 | - u32 val, ctrl_reg, alignment_mask; |
530 | + int i, access, closest_match = 0; |
531 | + u32 min_dist = -1, dist; |
532 | + u32 val, ctrl_reg; |
533 | struct perf_event *wp, **slots; |
534 | struct arch_hw_breakpoint *info; |
535 | struct arch_hw_breakpoint_ctrl ctrl; |
536 | |
537 | slots = this_cpu_ptr(wp_on_reg); |
538 | |
539 | + /* |
540 | + * Find all watchpoints that match the reported address. If no exact |
541 | + * match is found. Attribute the hit to the closest watchpoint. |
542 | + */ |
543 | + rcu_read_lock(); |
544 | for (i = 0; i < core_num_wrps; ++i) { |
545 | - rcu_read_lock(); |
546 | - |
547 | wp = slots[i]; |
548 | - |
549 | if (wp == NULL) |
550 | - goto unlock; |
551 | + continue; |
552 | |
553 | - info = counter_arch_bp(wp); |
554 | /* |
555 | * The DFAR is an unknown value on debug architectures prior |
556 | * to 7.1. Since we only allow a single watchpoint on these |
557 | @@ -714,33 +750,31 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, |
558 | */ |
559 | if (debug_arch < ARM_DEBUG_ARCH_V7_1) { |
560 | BUG_ON(i > 0); |
561 | + info = counter_arch_bp(wp); |
562 | info->trigger = wp->attr.bp_addr; |
563 | } else { |
564 | - if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) |
565 | - alignment_mask = 0x7; |
566 | - else |
567 | - alignment_mask = 0x3; |
568 | - |
569 | - /* Check if the watchpoint value matches. */ |
570 | - val = read_wb_reg(ARM_BASE_WVR + i); |
571 | - if (val != (addr & ~alignment_mask)) |
572 | - goto unlock; |
573 | - |
574 | - /* Possible match, check the byte address select. */ |
575 | - ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); |
576 | - decode_ctrl_reg(ctrl_reg, &ctrl); |
577 | - if (!((1 << (addr & alignment_mask)) & ctrl.len)) |
578 | - goto unlock; |
579 | - |
580 | /* Check that the access type matches. */ |
581 | if (debug_exception_updates_fsr()) { |
582 | access = (fsr & ARM_FSR_ACCESS_MASK) ? |
583 | HW_BREAKPOINT_W : HW_BREAKPOINT_R; |
584 | if (!(access & hw_breakpoint_type(wp))) |
585 | - goto unlock; |
586 | + continue; |
587 | } |
588 | |
589 | + val = read_wb_reg(ARM_BASE_WVR + i); |
590 | + ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); |
591 | + decode_ctrl_reg(ctrl_reg, &ctrl); |
592 | + dist = get_distance_from_watchpoint(addr, val, &ctrl); |
593 | + if (dist < min_dist) { |
594 | + min_dist = dist; |
595 | + closest_match = i; |
596 | + } |
597 | + /* Is this an exact match? */ |
598 | + if (dist != 0) |
599 | + continue; |
600 | + |
601 | /* We have a winner. */ |
602 | + info = counter_arch_bp(wp); |
603 | info->trigger = addr; |
604 | } |
605 | |
606 | @@ -762,13 +796,23 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, |
607 | * we can single-step over the watchpoint trigger. |
608 | */ |
609 | if (!is_default_overflow_handler(wp)) |
610 | - goto unlock; |
611 | - |
612 | + continue; |
613 | step: |
614 | enable_single_step(wp, instruction_pointer(regs)); |
615 | -unlock: |
616 | - rcu_read_unlock(); |
617 | } |
618 | + |
619 | + if (min_dist > 0 && min_dist != -1) { |
620 | + /* No exact match found. */ |
621 | + wp = slots[closest_match]; |
622 | + info = counter_arch_bp(wp); |
623 | + info->trigger = addr; |
624 | + pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); |
625 | + perf_bp_event(wp, regs); |
626 | + if (is_default_overflow_handler(wp)) |
627 | + enable_single_step(wp, instruction_pointer(regs)); |
628 | + } |
629 | + |
630 | + rcu_read_unlock(); |
631 | } |
632 | |
633 | static void watchpoint_single_step_handler(unsigned long pc) |
634 | diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig |
635 | index 301e572651c0f..790c87ee72716 100644 |
636 | --- a/arch/arm/plat-samsung/Kconfig |
637 | +++ b/arch/arm/plat-samsung/Kconfig |
638 | @@ -241,6 +241,7 @@ config SAMSUNG_PM_DEBUG |
639 | depends on PM && DEBUG_KERNEL |
640 | depends on PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 |
641 | depends on DEBUG_EXYNOS_UART || DEBUG_S3C24XX_UART || DEBUG_S3C2410_UART |
642 | + depends on DEBUG_LL && MMU |
643 | help |
644 | Say Y here if you want verbose debugging from the PM Suspend and |
645 | Resume code. See <file:Documentation/arm/samsung-s3c24xx/suspend.rst> |
646 | diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms |
647 | index 16d761475a860..9dccf4db319b1 100644 |
648 | --- a/arch/arm64/Kconfig.platforms |
649 | +++ b/arch/arm64/Kconfig.platforms |
650 | @@ -54,6 +54,7 @@ config ARCH_BCM_IPROC |
651 | config ARCH_BERLIN |
652 | bool "Marvell Berlin SoC Family" |
653 | select DW_APB_ICTL |
654 | + select DW_APB_TIMER_OF |
655 | select GPIOLIB |
656 | select PINCTRL |
657 | help |
658 | diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi |
659 | index 3ef89171538ff..d8fccf3d4987a 100644 |
660 | --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi |
661 | +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi |
662 | @@ -470,6 +470,7 @@ |
663 | mmc-hs200-1_8v; |
664 | mmc-hs400-1_8v; |
665 | non-removable; |
666 | + full-pwr-cycle-in-suspend; |
667 | status = "okay"; |
668 | }; |
669 | |
670 | diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h |
671 | index d719c6b4dd81c..7140701f65f91 100644 |
672 | --- a/arch/arm64/include/asm/kvm_host.h |
673 | +++ b/arch/arm64/include/asm/kvm_host.h |
674 | @@ -209,6 +209,7 @@ enum vcpu_sysreg { |
675 | #define cp14_DBGWCR0 (DBGWCR0_EL1 * 2) |
676 | #define cp14_DBGWVR0 (DBGWVR0_EL1 * 2) |
677 | #define cp14_DBGDCCINT (MDCCINT_EL1 * 2) |
678 | +#define cp14_DBGVCR (DBGVCR32_EL2 * 2) |
679 | |
680 | #define NR_COPRO_REGS (NR_SYS_REGS * 2) |
681 | |
682 | diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h |
683 | index 626ad01e83bf0..dd870390d639f 100644 |
684 | --- a/arch/arm64/include/asm/numa.h |
685 | +++ b/arch/arm64/include/asm/numa.h |
686 | @@ -25,6 +25,9 @@ const struct cpumask *cpumask_of_node(int node); |
687 | /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ |
688 | static inline const struct cpumask *cpumask_of_node(int node) |
689 | { |
690 | + if (node == NUMA_NO_NODE) |
691 | + return cpu_all_mask; |
692 | + |
693 | return node_to_cpumask_map[node]; |
694 | } |
695 | #endif |
696 | diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c |
697 | index fa9528dfd0ce3..113903db666c0 100644 |
698 | --- a/arch/arm64/kernel/topology.c |
699 | +++ b/arch/arm64/kernel/topology.c |
700 | @@ -35,21 +35,23 @@ void store_cpu_topology(unsigned int cpuid) |
701 | if (mpidr & MPIDR_UP_BITMASK) |
702 | return; |
703 | |
704 | - /* Create cpu topology mapping based on MPIDR. */ |
705 | - if (mpidr & MPIDR_MT_BITMASK) { |
706 | - /* Multiprocessor system : Multi-threads per core */ |
707 | - cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
708 | - cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
709 | - cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) | |
710 | - MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8; |
711 | - } else { |
712 | - /* Multiprocessor system : Single-thread per core */ |
713 | - cpuid_topo->thread_id = -1; |
714 | - cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
715 | - cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) | |
716 | - MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 | |
717 | - MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16; |
718 | - } |
719 | + /* |
720 | + * This would be the place to create cpu topology based on MPIDR. |
721 | + * |
722 | + * However, it cannot be trusted to depict the actual topology; some |
723 | + * pieces of the architecture enforce an artificial cap on Aff0 values |
724 | + * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an |
725 | + * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up |
726 | + * having absolutely no relationship to the actual underlying system |
727 | + * topology, and cannot be reasonably used as core / package ID. |
728 | + * |
729 | + * If the MT bit is set, Aff0 *could* be used to define a thread ID, but |
730 | + * we still wouldn't be able to obtain a sane core ID. This means we |
731 | + * need to entirely ignore MPIDR for any topology deduction. |
732 | + */ |
733 | + cpuid_topo->thread_id = -1; |
734 | + cpuid_topo->core_id = cpuid; |
735 | + cpuid_topo->package_id = cpu_to_node(cpuid); |
736 | |
737 | pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", |
738 | cpuid, cpuid_topo->package_id, cpuid_topo->core_id, |
739 | diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c |
740 | index d43f44b3377e0..0ed7598dfa6a0 100644 |
741 | --- a/arch/arm64/kvm/sys_regs.c |
742 | +++ b/arch/arm64/kvm/sys_regs.c |
743 | @@ -1746,9 +1746,9 @@ static const struct sys_reg_desc cp14_regs[] = { |
744 | { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, |
745 | DBG_BCR_BVR_WCR_WVR(1), |
746 | /* DBGDCCINT */ |
747 | - { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, |
748 | + { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT }, |
749 | /* DBGDSCRext */ |
750 | - { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, |
751 | + { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext }, |
752 | DBG_BCR_BVR_WCR_WVR(2), |
753 | /* DBGDTR[RT]Xint */ |
754 | { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, |
755 | @@ -1763,7 +1763,7 @@ static const struct sys_reg_desc cp14_regs[] = { |
756 | { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, |
757 | DBG_BCR_BVR_WCR_WVR(6), |
758 | /* DBGVCR */ |
759 | - { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, |
760 | + { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR }, |
761 | DBG_BCR_BVR_WCR_WVR(7), |
762 | DBG_BCR_BVR_WCR_WVR(8), |
763 | DBG_BCR_BVR_WCR_WVR(9), |
764 | diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c |
765 | index 4decf16597008..53ebb4babf3a7 100644 |
766 | --- a/arch/arm64/mm/numa.c |
767 | +++ b/arch/arm64/mm/numa.c |
768 | @@ -46,7 +46,11 @@ EXPORT_SYMBOL(node_to_cpumask_map); |
769 | */ |
770 | const struct cpumask *cpumask_of_node(int node) |
771 | { |
772 | - if (WARN_ON(node >= nr_node_ids)) |
773 | + |
774 | + if (node == NUMA_NO_NODE) |
775 | + return cpu_all_mask; |
776 | + |
777 | + if (WARN_ON(node < 0 || node >= nr_node_ids)) |
778 | return cpu_none_mask; |
779 | |
780 | if (WARN_ON(node_to_cpumask_map[node] == NULL)) |
781 | diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile |
782 | index 1a8df6669eee6..18d6008b151fd 100644 |
783 | --- a/arch/ia64/kernel/Makefile |
784 | +++ b/arch/ia64/kernel/Makefile |
785 | @@ -41,7 +41,7 @@ obj-y += esi_stub.o # must be in kernel proper |
786 | endif |
787 | obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o |
788 | |
789 | -obj-$(CONFIG_BINFMT_ELF) += elfcore.o |
790 | +obj-$(CONFIG_ELF_CORE) += elfcore.o |
791 | |
792 | # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. |
793 | CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 |
794 | diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c |
795 | index b8356edbde659..b3dc39050c1ad 100644 |
796 | --- a/arch/ia64/kernel/kprobes.c |
797 | +++ b/arch/ia64/kernel/kprobes.c |
798 | @@ -396,83 +396,9 @@ static void kretprobe_trampoline(void) |
799 | { |
800 | } |
801 | |
802 | -/* |
803 | - * At this point the target function has been tricked into |
804 | - * returning into our trampoline. Lookup the associated instance |
805 | - * and then: |
806 | - * - call the handler function |
807 | - * - cleanup by marking the instance as unused |
808 | - * - long jump back to the original return address |
809 | - */ |
810 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
811 | { |
812 | - struct kretprobe_instance *ri = NULL; |
813 | - struct hlist_head *head, empty_rp; |
814 | - struct hlist_node *tmp; |
815 | - unsigned long flags, orig_ret_address = 0; |
816 | - unsigned long trampoline_address = |
817 | - ((struct fnptr *)kretprobe_trampoline)->ip; |
818 | - |
819 | - INIT_HLIST_HEAD(&empty_rp); |
820 | - kretprobe_hash_lock(current, &head, &flags); |
821 | - |
822 | - /* |
823 | - * It is possible to have multiple instances associated with a given |
824 | - * task either because an multiple functions in the call path |
825 | - * have a return probe installed on them, and/or more than one return |
826 | - * return probe was registered for a target function. |
827 | - * |
828 | - * We can handle this because: |
829 | - * - instances are always inserted at the head of the list |
830 | - * - when multiple return probes are registered for the same |
831 | - * function, the first instance's ret_addr will point to the |
832 | - * real return address, and all the rest will point to |
833 | - * kretprobe_trampoline |
834 | - */ |
835 | - hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
836 | - if (ri->task != current) |
837 | - /* another task is sharing our hash bucket */ |
838 | - continue; |
839 | - |
840 | - orig_ret_address = (unsigned long)ri->ret_addr; |
841 | - if (orig_ret_address != trampoline_address) |
842 | - /* |
843 | - * This is the real return address. Any other |
844 | - * instances associated with this task are for |
845 | - * other calls deeper on the call stack |
846 | - */ |
847 | - break; |
848 | - } |
849 | - |
850 | - regs->cr_iip = orig_ret_address; |
851 | - |
852 | - hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
853 | - if (ri->task != current) |
854 | - /* another task is sharing our hash bucket */ |
855 | - continue; |
856 | - |
857 | - if (ri->rp && ri->rp->handler) |
858 | - ri->rp->handler(ri, regs); |
859 | - |
860 | - orig_ret_address = (unsigned long)ri->ret_addr; |
861 | - recycle_rp_inst(ri, &empty_rp); |
862 | - |
863 | - if (orig_ret_address != trampoline_address) |
864 | - /* |
865 | - * This is the real return address. Any other |
866 | - * instances associated with this task are for |
867 | - * other calls deeper on the call stack |
868 | - */ |
869 | - break; |
870 | - } |
871 | - kretprobe_assert(ri, orig_ret_address, trampoline_address); |
872 | - |
873 | - kretprobe_hash_unlock(current, &flags); |
874 | - |
875 | - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
876 | - hlist_del(&ri->hlist); |
877 | - kfree(ri); |
878 | - } |
879 | + regs->cr_iip = __kretprobe_trampoline_handler(regs, kretprobe_trampoline, NULL); |
880 | /* |
881 | * By returning a non-zero value, we are telling |
882 | * kprobe_handler() that we don't want the post_handler |
883 | @@ -485,6 +411,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, |
884 | struct pt_regs *regs) |
885 | { |
886 | ri->ret_addr = (kprobe_opcode_t *)regs->b0; |
887 | + ri->fp = NULL; |
888 | |
889 | /* Replace the return addr with trampoline addr */ |
890 | regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; |
891 | diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c |
892 | index 61a0bf13e3083..1fc8dffa8d1d0 100644 |
893 | --- a/arch/mips/dec/setup.c |
894 | +++ b/arch/mips/dec/setup.c |
895 | @@ -6,7 +6,7 @@ |
896 | * for more details. |
897 | * |
898 | * Copyright (C) 1998 Harald Koerfgen |
899 | - * Copyright (C) 2000, 2001, 2002, 2003, 2005 Maciej W. Rozycki |
900 | + * Copyright (C) 2000, 2001, 2002, 2003, 2005, 2020 Maciej W. Rozycki |
901 | */ |
902 | #include <linux/console.h> |
903 | #include <linux/export.h> |
904 | @@ -15,6 +15,7 @@ |
905 | #include <linux/ioport.h> |
906 | #include <linux/irq.h> |
907 | #include <linux/irqnr.h> |
908 | +#include <linux/memblock.h> |
909 | #include <linux/param.h> |
910 | #include <linux/percpu-defs.h> |
911 | #include <linux/sched.h> |
912 | @@ -22,6 +23,7 @@ |
913 | #include <linux/types.h> |
914 | #include <linux/pm.h> |
915 | |
916 | +#include <asm/addrspace.h> |
917 | #include <asm/bootinfo.h> |
918 | #include <asm/cpu.h> |
919 | #include <asm/cpu-features.h> |
920 | @@ -29,7 +31,9 @@ |
921 | #include <asm/irq.h> |
922 | #include <asm/irq_cpu.h> |
923 | #include <asm/mipsregs.h> |
924 | +#include <asm/page.h> |
925 | #include <asm/reboot.h> |
926 | +#include <asm/sections.h> |
927 | #include <asm/time.h> |
928 | #include <asm/traps.h> |
929 | #include <asm/wbflush.h> |
930 | @@ -166,6 +170,9 @@ void __init plat_mem_setup(void) |
931 | |
932 | ioport_resource.start = ~0UL; |
933 | ioport_resource.end = 0UL; |
934 | + |
935 | + /* Stay away from the firmware working memory area for now. */ |
936 | + memblock_reserve(PHYS_OFFSET, __pa_symbol(&_text) - PHYS_OFFSET); |
937 | } |
938 | |
939 | /* |
940 | diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig |
941 | index ad620637cbd11..cb285e474c880 100644 |
942 | --- a/arch/powerpc/Kconfig |
943 | +++ b/arch/powerpc/Kconfig |
944 | @@ -147,6 +147,7 @@ config PPC |
945 | select ARCH_USE_BUILTIN_BSWAP |
946 | select ARCH_USE_CMPXCHG_LOCKREF if PPC64 |
947 | select ARCH_WANT_IPC_PARSE_VERSION |
948 | + select ARCH_WANT_IRQS_OFF_ACTIVATE_MM |
949 | select ARCH_WEAK_RELEASE_ACQUIRE |
950 | select BINFMT_ELF |
951 | select BUILDTIME_EXTABLE_SORT |
952 | @@ -1023,6 +1024,19 @@ config FSL_RIO |
953 | Include support for RapidIO controller on Freescale embedded |
954 | processors (MPC8548, MPC8641, etc). |
955 | |
956 | +config PPC_RTAS_FILTER |
957 | + bool "Enable filtering of RTAS syscalls" |
958 | + default y |
959 | + depends on PPC_RTAS |
960 | + help |
961 | + The RTAS syscall API has security issues that could be used to |
962 | + compromise system integrity. This option enforces restrictions on the |
963 | + RTAS calls and arguments passed by userspace programs to mitigate |
964 | + these issues. |
965 | + |
966 | + Say Y unless you know what you are doing and the filter is causing |
967 | + problems for you. |
968 | + |
969 | endmenu |
970 | |
971 | config NONSTATIC_KERNEL |
972 | diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h |
973 | index bea7a2405ba5d..ee61542c6c3d9 100644 |
974 | --- a/arch/powerpc/include/asm/drmem.h |
975 | +++ b/arch/powerpc/include/asm/drmem.h |
976 | @@ -20,7 +20,7 @@ struct drmem_lmb { |
977 | struct drmem_lmb_info { |
978 | struct drmem_lmb *lmbs; |
979 | int n_lmbs; |
980 | - u32 lmb_size; |
981 | + u64 lmb_size; |
982 | }; |
983 | |
984 | extern struct drmem_lmb_info *drmem_info; |
985 | @@ -79,7 +79,7 @@ struct of_drconf_cell_v2 { |
986 | #define DRCONF_MEM_AI_INVALID 0x00000040 |
987 | #define DRCONF_MEM_RESERVED 0x00000080 |
988 | |
989 | -static inline u32 drmem_lmb_size(void) |
990 | +static inline u64 drmem_lmb_size(void) |
991 | { |
992 | return drmem_info->lmb_size; |
993 | } |
994 | diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h |
995 | index 58efca9343113..f132b418a8c7a 100644 |
996 | --- a/arch/powerpc/include/asm/mmu_context.h |
997 | +++ b/arch/powerpc/include/asm/mmu_context.h |
998 | @@ -216,7 +216,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
999 | */ |
1000 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) |
1001 | { |
1002 | - switch_mm(prev, next, current); |
1003 | + switch_mm_irqs_off(prev, next, current); |
1004 | } |
1005 | |
1006 | /* We don't currently use enter_lazy_tlb() for anything */ |
1007 | diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S |
1008 | index 4a24f8f026c79..5e2f2fd78b94f 100644 |
1009 | --- a/arch/powerpc/kernel/head_32.S |
1010 | +++ b/arch/powerpc/kernel/head_32.S |
1011 | @@ -843,7 +843,7 @@ BEGIN_MMU_FTR_SECTION |
1012 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) |
1013 | blr |
1014 | |
1015 | -load_segment_registers: |
1016 | +_GLOBAL(load_segment_registers) |
1017 | li r0, NUM_USER_SEGMENTS /* load up user segment register values */ |
1018 | mtctr r0 /* for context 0 */ |
1019 | li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */ |
1020 | diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c |
1021 | index 01210593d60c3..c62ff66d44ad9 100644 |
1022 | --- a/arch/powerpc/kernel/rtas.c |
1023 | +++ b/arch/powerpc/kernel/rtas.c |
1024 | @@ -940,6 +940,147 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, |
1025 | return NULL; |
1026 | } |
1027 | |
1028 | +#ifdef CONFIG_PPC_RTAS_FILTER |
1029 | + |
1030 | +/* |
1031 | + * The sys_rtas syscall, as originally designed, allows root to pass |
1032 | + * arbitrary physical addresses to RTAS calls. A number of RTAS calls |
1033 | + * can be abused to write to arbitrary memory and do other things that |
1034 | + * are potentially harmful to system integrity, and thus should only |
1035 | + * be used inside the kernel and not exposed to userspace. |
1036 | + * |
1037 | + * All known legitimate users of the sys_rtas syscall will only ever |
1038 | + * pass addresses that fall within the RMO buffer, and use a known |
1039 | + * subset of RTAS calls. |
1040 | + * |
1041 | + * Accordingly, we filter RTAS requests to check that the call is |
1042 | + * permitted, and that provided pointers fall within the RMO buffer. |
1043 | + * The rtas_filters list contains an entry for each permitted call, |
1044 | + * with the indexes of the parameters which are expected to contain |
1045 | + * addresses and sizes of buffers allocated inside the RMO buffer. |
1046 | + */ |
1047 | +struct rtas_filter { |
1048 | + const char *name; |
1049 | + int token; |
1050 | + /* Indexes into the args buffer, -1 if not used */ |
1051 | + int buf_idx1; |
1052 | + int size_idx1; |
1053 | + int buf_idx2; |
1054 | + int size_idx2; |
1055 | + |
1056 | + int fixed_size; |
1057 | +}; |
1058 | + |
1059 | +static struct rtas_filter rtas_filters[] __ro_after_init = { |
1060 | + { "ibm,activate-firmware", -1, -1, -1, -1, -1 }, |
1061 | + { "ibm,configure-connector", -1, 0, -1, 1, -1, 4096 }, /* Special cased */ |
1062 | + { "display-character", -1, -1, -1, -1, -1 }, |
1063 | + { "ibm,display-message", -1, 0, -1, -1, -1 }, |
1064 | + { "ibm,errinjct", -1, 2, -1, -1, -1, 1024 }, |
1065 | + { "ibm,close-errinjct", -1, -1, -1, -1, -1 }, |
1066 | + { "ibm,open-errinct", -1, -1, -1, -1, -1 }, |
1067 | + { "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 }, |
1068 | + { "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 }, |
1069 | + { "ibm,get-indices", -1, 2, 3, -1, -1 }, |
1070 | + { "get-power-level", -1, -1, -1, -1, -1 }, |
1071 | + { "get-sensor-state", -1, -1, -1, -1, -1 }, |
1072 | + { "ibm,get-system-parameter", -1, 1, 2, -1, -1 }, |
1073 | + { "get-time-of-day", -1, -1, -1, -1, -1 }, |
1074 | + { "ibm,get-vpd", -1, 0, -1, 1, 2 }, |
1075 | + { "ibm,lpar-perftools", -1, 2, 3, -1, -1 }, |
1076 | + { "ibm,platform-dump", -1, 4, 5, -1, -1 }, |
1077 | + { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 }, |
1078 | + { "ibm,scan-log-dump", -1, 0, 1, -1, -1 }, |
1079 | + { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 }, |
1080 | + { "ibm,set-eeh-option", -1, -1, -1, -1, -1 }, |
1081 | + { "set-indicator", -1, -1, -1, -1, -1 }, |
1082 | + { "set-power-level", -1, -1, -1, -1, -1 }, |
1083 | + { "set-time-for-power-on", -1, -1, -1, -1, -1 }, |
1084 | + { "ibm,set-system-parameter", -1, 1, -1, -1, -1 }, |
1085 | + { "set-time-of-day", -1, -1, -1, -1, -1 }, |
1086 | + { "ibm,suspend-me", -1, -1, -1, -1, -1 }, |
1087 | + { "ibm,update-nodes", -1, 0, -1, -1, -1, 4096 }, |
1088 | + { "ibm,update-properties", -1, 0, -1, -1, -1, 4096 }, |
1089 | + { "ibm,physical-attestation", -1, 0, 1, -1, -1 }, |
1090 | +}; |
1091 | + |
1092 | +static bool in_rmo_buf(u32 base, u32 end) |
1093 | +{ |
1094 | + return base >= rtas_rmo_buf && |
1095 | + base < (rtas_rmo_buf + RTAS_RMOBUF_MAX) && |
1096 | + base <= end && |
1097 | + end >= rtas_rmo_buf && |
1098 | + end < (rtas_rmo_buf + RTAS_RMOBUF_MAX); |
1099 | +} |
1100 | + |
1101 | +static bool block_rtas_call(int token, int nargs, |
1102 | + struct rtas_args *args) |
1103 | +{ |
1104 | + int i; |
1105 | + |
1106 | + for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) { |
1107 | + struct rtas_filter *f = &rtas_filters[i]; |
1108 | + u32 base, size, end; |
1109 | + |
1110 | + if (token != f->token) |
1111 | + continue; |
1112 | + |
1113 | + if (f->buf_idx1 != -1) { |
1114 | + base = be32_to_cpu(args->args[f->buf_idx1]); |
1115 | + if (f->size_idx1 != -1) |
1116 | + size = be32_to_cpu(args->args[f->size_idx1]); |
1117 | + else if (f->fixed_size) |
1118 | + size = f->fixed_size; |
1119 | + else |
1120 | + size = 1; |
1121 | + |
1122 | + end = base + size - 1; |
1123 | + if (!in_rmo_buf(base, end)) |
1124 | + goto err; |
1125 | + } |
1126 | + |
1127 | + if (f->buf_idx2 != -1) { |
1128 | + base = be32_to_cpu(args->args[f->buf_idx2]); |
1129 | + if (f->size_idx2 != -1) |
1130 | + size = be32_to_cpu(args->args[f->size_idx2]); |
1131 | + else if (f->fixed_size) |
1132 | + size = f->fixed_size; |
1133 | + else |
1134 | + size = 1; |
1135 | + end = base + size - 1; |
1136 | + |
1137 | + /* |
1138 | + * Special case for ibm,configure-connector where the |
1139 | + * address can be 0 |
1140 | + */ |
1141 | + if (!strcmp(f->name, "ibm,configure-connector") && |
1142 | + base == 0) |
1143 | + return false; |
1144 | + |
1145 | + if (!in_rmo_buf(base, end)) |
1146 | + goto err; |
1147 | + } |
1148 | + |
1149 | + return false; |
1150 | + } |
1151 | + |
1152 | +err: |
1153 | + pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n"); |
1154 | + pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n", |
1155 | + token, nargs, current->comm); |
1156 | + return true; |
1157 | +} |
1158 | + |
1159 | +#else |
1160 | + |
1161 | +static bool block_rtas_call(int token, int nargs, |
1162 | + struct rtas_args *args) |
1163 | +{ |
1164 | + return false; |
1165 | +} |
1166 | + |
1167 | +#endif /* CONFIG_PPC_RTAS_FILTER */ |
1168 | + |
1169 | /* We assume to be passed big endian arguments */ |
1170 | SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) |
1171 | { |
1172 | @@ -977,6 +1118,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) |
1173 | args.rets = &args.args[nargs]; |
1174 | memset(args.rets, 0, nret * sizeof(rtas_arg_t)); |
1175 | |
1176 | + if (block_rtas_call(token, nargs, &args)) |
1177 | + return -EINVAL; |
1178 | + |
1179 | /* Need to handle ibm,suspend_me call specially */ |
1180 | if (token == ibm_suspend_me_token) { |
1181 | |
1182 | @@ -1038,6 +1182,9 @@ void __init rtas_initialize(void) |
1183 | unsigned long rtas_region = RTAS_INSTANTIATE_MAX; |
1184 | u32 base, size, entry; |
1185 | int no_base, no_size, no_entry; |
1186 | +#ifdef CONFIG_PPC_RTAS_FILTER |
1187 | + int i; |
1188 | +#endif |
1189 | |
1190 | /* Get RTAS dev node and fill up our "rtas" structure with infos |
1191 | * about it. |
1192 | @@ -1077,6 +1224,12 @@ void __init rtas_initialize(void) |
1193 | #ifdef CONFIG_RTAS_ERROR_LOGGING |
1194 | rtas_last_error_token = rtas_token("rtas-last-error"); |
1195 | #endif |
1196 | + |
1197 | +#ifdef CONFIG_PPC_RTAS_FILTER |
1198 | + for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) { |
1199 | + rtas_filters[i].token = rtas_token(rtas_filters[i].name); |
1200 | + } |
1201 | +#endif |
1202 | } |
1203 | |
1204 | int __init early_init_dt_scan_rtas(unsigned long node, |
1205 | diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c |
1206 | index 80a676da11cbc..f08ca604a3941 100644 |
1207 | --- a/arch/powerpc/kernel/sysfs.c |
1208 | +++ b/arch/powerpc/kernel/sysfs.c |
1209 | @@ -31,29 +31,27 @@ |
1210 | |
1211 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
1212 | |
1213 | -/* |
1214 | - * SMT snooze delay stuff, 64-bit only for now |
1215 | - */ |
1216 | - |
1217 | #ifdef CONFIG_PPC64 |
1218 | |
1219 | -/* Time in microseconds we delay before sleeping in the idle loop */ |
1220 | -static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 }; |
1221 | +/* |
1222 | + * Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle: |
1223 | + * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in |
1224 | + * 2014: |
1225 | + * |
1226 | + * "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean |
1227 | + * up the kernel code." |
1228 | + * |
1229 | + * powerpc-utils stopped using it as of 1.3.8. At some point in the future this |
1230 | + * code should be removed. |
1231 | + */ |
1232 | |
1233 | static ssize_t store_smt_snooze_delay(struct device *dev, |
1234 | struct device_attribute *attr, |
1235 | const char *buf, |
1236 | size_t count) |
1237 | { |
1238 | - struct cpu *cpu = container_of(dev, struct cpu, dev); |
1239 | - ssize_t ret; |
1240 | - long snooze; |
1241 | - |
1242 | - ret = sscanf(buf, "%ld", &snooze); |
1243 | - if (ret != 1) |
1244 | - return -EINVAL; |
1245 | - |
1246 | - per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; |
1247 | + pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n", |
1248 | + current->comm, current->pid); |
1249 | return count; |
1250 | } |
1251 | |
1252 | @@ -61,9 +59,9 @@ static ssize_t show_smt_snooze_delay(struct device *dev, |
1253 | struct device_attribute *attr, |
1254 | char *buf) |
1255 | { |
1256 | - struct cpu *cpu = container_of(dev, struct cpu, dev); |
1257 | - |
1258 | - return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); |
1259 | + pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n", |
1260 | + current->comm, current->pid); |
1261 | + return sprintf(buf, "100\n"); |
1262 | } |
1263 | |
1264 | static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, |
1265 | @@ -71,16 +69,10 @@ static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, |
1266 | |
1267 | static int __init setup_smt_snooze_delay(char *str) |
1268 | { |
1269 | - unsigned int cpu; |
1270 | - long snooze; |
1271 | - |
1272 | if (!cpu_has_feature(CPU_FTR_SMT)) |
1273 | return 1; |
1274 | |
1275 | - snooze = simple_strtol(str, NULL, 10); |
1276 | - for_each_possible_cpu(cpu) |
1277 | - per_cpu(smt_snooze_delay, cpu) = snooze; |
1278 | - |
1279 | + pr_warn("smt-snooze-delay command line option has no effect\n"); |
1280 | return 1; |
1281 | } |
1282 | __setup("smt-snooze-delay=", setup_smt_snooze_delay); |
1283 | diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c |
1284 | index 9432fc6af28a5..206032c9b5458 100644 |
1285 | --- a/arch/powerpc/kernel/traps.c |
1286 | +++ b/arch/powerpc/kernel/traps.c |
1287 | @@ -877,7 +877,7 @@ static void p9_hmi_special_emu(struct pt_regs *regs) |
1288 | { |
1289 | unsigned int ra, rb, t, i, sel, instr, rc; |
1290 | const void __user *addr; |
1291 | - u8 vbuf[16], *vdst; |
1292 | + u8 vbuf[16] __aligned(16), *vdst; |
1293 | unsigned long ea, msr, msr_mask; |
1294 | bool swap; |
1295 | |
1296 | diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c |
1297 | index e2183fed947d4..dd9b19b1f459a 100644 |
1298 | --- a/arch/powerpc/kvm/book3s_hv.c |
1299 | +++ b/arch/powerpc/kvm/book3s_hv.c |
1300 | @@ -5191,6 +5191,12 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp, |
1301 | case KVM_PPC_ALLOCATE_HTAB: { |
1302 | u32 htab_order; |
1303 | |
1304 | + /* If we're a nested hypervisor, we currently only support radix */ |
1305 | + if (kvmhv_on_pseries()) { |
1306 | + r = -EOPNOTSUPP; |
1307 | + break; |
1308 | + } |
1309 | + |
1310 | r = -EFAULT; |
1311 | if (get_user(htab_order, (u32 __user *)argp)) |
1312 | break; |
1313 | diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S |
1314 | index bd6085b470b7a..935dcac4c02d3 100644 |
1315 | --- a/arch/powerpc/platforms/powermac/sleep.S |
1316 | +++ b/arch/powerpc/platforms/powermac/sleep.S |
1317 | @@ -293,14 +293,7 @@ grackle_wake_up: |
1318 | * we do any r1 memory access as we are not sure they |
1319 | * are in a sane state above the first 256Mb region |
1320 | */ |
1321 | - li r0,16 /* load up segment register values */ |
1322 | - mtctr r0 /* for context 0 */ |
1323 | - lis r3,0x2000 /* Ku = 1, VSID = 0 */ |
1324 | - li r4,0 |
1325 | -3: mtsrin r3,r4 |
1326 | - addi r3,r3,0x111 /* increment VSID */ |
1327 | - addis r4,r4,0x1000 /* address of next segment */ |
1328 | - bdnz 3b |
1329 | + bl load_segment_registers |
1330 | sync |
1331 | isync |
1332 | |
1333 | diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c |
1334 | index 62ef7ad995da3..5e33b1fc67c2b 100644 |
1335 | --- a/arch/powerpc/platforms/powernv/opal-elog.c |
1336 | +++ b/arch/powerpc/platforms/powernv/opal-elog.c |
1337 | @@ -179,14 +179,14 @@ static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj, |
1338 | return count; |
1339 | } |
1340 | |
1341 | -static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) |
1342 | +static void create_elog_obj(uint64_t id, size_t size, uint64_t type) |
1343 | { |
1344 | struct elog_obj *elog; |
1345 | int rc; |
1346 | |
1347 | elog = kzalloc(sizeof(*elog), GFP_KERNEL); |
1348 | if (!elog) |
1349 | - return NULL; |
1350 | + return; |
1351 | |
1352 | elog->kobj.kset = elog_kset; |
1353 | |
1354 | @@ -219,18 +219,37 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) |
1355 | rc = kobject_add(&elog->kobj, NULL, "0x%llx", id); |
1356 | if (rc) { |
1357 | kobject_put(&elog->kobj); |
1358 | - return NULL; |
1359 | + return; |
1360 | } |
1361 | |
1362 | + /* |
1363 | + * As soon as the sysfs file for this elog is created/activated there is |
1364 | + * a chance the opal_errd daemon (or any userspace) might read and |
1365 | + * acknowledge the elog before kobject_uevent() is called. If that |
1366 | + * happens then there is a potential race between |
1367 | + * elog_ack_store->kobject_put() and kobject_uevent() which leads to a |
1368 | + * use-after-free of a kernfs object resulting in a kernel crash. |
1369 | + * |
1370 | + * To avoid that, we need to take a reference on behalf of the bin file, |
1371 | + * so that our reference remains valid while we call kobject_uevent(). |
1372 | + * We then drop our reference before exiting the function, leaving the |
1373 | + * bin file to drop the last reference (if it hasn't already). |
1374 | + */ |
1375 | + |
1376 | + /* Take a reference for the bin file */ |
1377 | + kobject_get(&elog->kobj); |
1378 | rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr); |
1379 | - if (rc) { |
1380 | + if (rc == 0) { |
1381 | + kobject_uevent(&elog->kobj, KOBJ_ADD); |
1382 | + } else { |
1383 | + /* Drop the reference taken for the bin file */ |
1384 | kobject_put(&elog->kobj); |
1385 | - return NULL; |
1386 | } |
1387 | |
1388 | - kobject_uevent(&elog->kobj, KOBJ_ADD); |
1389 | + /* Drop our reference */ |
1390 | + kobject_put(&elog->kobj); |
1391 | |
1392 | - return elog; |
1393 | + return; |
1394 | } |
1395 | |
1396 | static irqreturn_t elog_event(int irq, void *data) |
1397 | diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c |
1398 | index b2ba3e95bda73..bbf361f23ae86 100644 |
1399 | --- a/arch/powerpc/platforms/powernv/smp.c |
1400 | +++ b/arch/powerpc/platforms/powernv/smp.c |
1401 | @@ -43,7 +43,7 @@ |
1402 | #include <asm/udbg.h> |
1403 | #define DBG(fmt...) udbg_printf(fmt) |
1404 | #else |
1405 | -#define DBG(fmt...) |
1406 | +#define DBG(fmt...) do { } while (0) |
1407 | #endif |
1408 | |
1409 | static void pnv_smp_setup_cpu(int cpu) |
1410 | diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c |
1411 | index 66b32f46702de..f364909d0c08d 100644 |
1412 | --- a/arch/powerpc/platforms/pseries/hotplug-memory.c |
1413 | +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c |
1414 | @@ -279,7 +279,7 @@ static int dlpar_offline_lmb(struct drmem_lmb *lmb) |
1415 | return dlpar_change_lmb_state(lmb, false); |
1416 | } |
1417 | |
1418 | -static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) |
1419 | +static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size) |
1420 | { |
1421 | unsigned long block_sz, start_pfn; |
1422 | int sections_per_block; |
1423 | @@ -310,10 +310,11 @@ out: |
1424 | |
1425 | static int pseries_remove_mem_node(struct device_node *np) |
1426 | { |
1427 | - const __be32 *regs; |
1428 | + const __be32 *prop; |
1429 | unsigned long base; |
1430 | - unsigned int lmb_size; |
1431 | + unsigned long lmb_size; |
1432 | int ret = -EINVAL; |
1433 | + int addr_cells, size_cells; |
1434 | |
1435 | /* |
1436 | * Check to see if we are actually removing memory |
1437 | @@ -324,12 +325,19 @@ static int pseries_remove_mem_node(struct device_node *np) |
1438 | /* |
1439 | * Find the base address and size of the memblock |
1440 | */ |
1441 | - regs = of_get_property(np, "reg", NULL); |
1442 | - if (!regs) |
1443 | + prop = of_get_property(np, "reg", NULL); |
1444 | + if (!prop) |
1445 | return ret; |
1446 | |
1447 | - base = be64_to_cpu(*(unsigned long *)regs); |
1448 | - lmb_size = be32_to_cpu(regs[3]); |
1449 | + addr_cells = of_n_addr_cells(np); |
1450 | + size_cells = of_n_size_cells(np); |
1451 | + |
1452 | + /* |
1453 | + * "reg" property represents (addr,size) tuple. |
1454 | + */ |
1455 | + base = of_read_number(prop, addr_cells); |
1456 | + prop += addr_cells; |
1457 | + lmb_size = of_read_number(prop, size_cells); |
1458 | |
1459 | pseries_remove_memblock(base, lmb_size); |
1460 | return 0; |
1461 | @@ -620,7 +628,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index) |
1462 | |
1463 | #else |
1464 | static inline int pseries_remove_memblock(unsigned long base, |
1465 | - unsigned int memblock_size) |
1466 | + unsigned long memblock_size) |
1467 | { |
1468 | return -EOPNOTSUPP; |
1469 | } |
1470 | @@ -953,10 +961,11 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) |
1471 | |
1472 | static int pseries_add_mem_node(struct device_node *np) |
1473 | { |
1474 | - const __be32 *regs; |
1475 | + const __be32 *prop; |
1476 | unsigned long base; |
1477 | - unsigned int lmb_size; |
1478 | + unsigned long lmb_size; |
1479 | int ret = -EINVAL; |
1480 | + int addr_cells, size_cells; |
1481 | |
1482 | /* |
1483 | * Check to see if we are actually adding memory |
1484 | @@ -967,12 +976,18 @@ static int pseries_add_mem_node(struct device_node *np) |
1485 | /* |
1486 | * Find the base and size of the memblock |
1487 | */ |
1488 | - regs = of_get_property(np, "reg", NULL); |
1489 | - if (!regs) |
1490 | + prop = of_get_property(np, "reg", NULL); |
1491 | + if (!prop) |
1492 | return ret; |
1493 | |
1494 | - base = be64_to_cpu(*(unsigned long *)regs); |
1495 | - lmb_size = be32_to_cpu(regs[3]); |
1496 | + addr_cells = of_n_addr_cells(np); |
1497 | + size_cells = of_n_size_cells(np); |
1498 | + /* |
1499 | + * "reg" property represents (addr,size) tuple. |
1500 | + */ |
1501 | + base = of_read_number(prop, addr_cells); |
1502 | + prop += addr_cells; |
1503 | + lmb_size = of_read_number(prop, size_cells); |
1504 | |
1505 | /* |
1506 | * Update memory region to represent the memory add |
1507 | diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h |
1508 | index d86cb17bbabe6..22e0ae8884061 100644 |
1509 | --- a/arch/riscv/include/uapi/asm/auxvec.h |
1510 | +++ b/arch/riscv/include/uapi/asm/auxvec.h |
1511 | @@ -10,4 +10,7 @@ |
1512 | /* vDSO location */ |
1513 | #define AT_SYSINFO_EHDR 33 |
1514 | |
1515 | +/* entries in ARCH_DLINFO */ |
1516 | +#define AT_VECTOR_SIZE_ARCH 1 |
1517 | + |
1518 | #endif /* _UAPI_ASM_RISCV_AUXVEC_H */ |
1519 | diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S |
1520 | index 4b86a8d3c1219..e6bf5f40bff34 100644 |
1521 | --- a/arch/s390/boot/head.S |
1522 | +++ b/arch/s390/boot/head.S |
1523 | @@ -360,22 +360,23 @@ ENTRY(startup_kdump) |
1524 | # the save area and does disabled wait with a faulty address. |
1525 | # |
1526 | ENTRY(startup_pgm_check_handler) |
1527 | - stmg %r0,%r15,__LC_SAVE_AREA_SYNC |
1528 | - la %r1,4095 |
1529 | - stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r1) |
1530 | - mvc __LC_GPREGS_SAVE_AREA-4095(128,%r1),__LC_SAVE_AREA_SYNC |
1531 | - mvc __LC_PSW_SAVE_AREA-4095(16,%r1),__LC_PGM_OLD_PSW |
1532 | + stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
1533 | + la %r8,4095 |
1534 | + stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8) |
1535 | + stmg %r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8) |
1536 | + mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC |
1537 | + mvc __LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW |
1538 | mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW |
1539 | ni __LC_RETURN_PSW,0xfc # remove IO and EX bits |
1540 | ni __LC_RETURN_PSW+1,0xfb # remove MCHK bit |
1541 | oi __LC_RETURN_PSW+1,0x2 # set wait state bit |
1542 | - larl %r2,.Lold_psw_disabled_wait |
1543 | - stg %r2,__LC_PGM_NEW_PSW+8 |
1544 | - l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r2) |
1545 | + larl %r9,.Lold_psw_disabled_wait |
1546 | + stg %r9,__LC_PGM_NEW_PSW+8 |
1547 | + l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r9) |
1548 | brasl %r14,print_pgm_check_info |
1549 | .Lold_psw_disabled_wait: |
1550 | - la %r1,4095 |
1551 | - lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) |
1552 | + la %r8,4095 |
1553 | + lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8) |
1554 | lpswe __LC_RETURN_PSW # disabled wait |
1555 | .Ldump_info_stack: |
1556 | .long 0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD |
1557 | diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c |
1558 | index 8ea9db599d38d..11c32b228f518 100644 |
1559 | --- a/arch/s390/kernel/time.c |
1560 | +++ b/arch/s390/kernel/time.c |
1561 | @@ -354,8 +354,9 @@ static DEFINE_PER_CPU(atomic_t, clock_sync_word); |
1562 | static DEFINE_MUTEX(clock_sync_mutex); |
1563 | static unsigned long clock_sync_flags; |
1564 | |
1565 | -#define CLOCK_SYNC_HAS_STP 0 |
1566 | -#define CLOCK_SYNC_STP 1 |
1567 | +#define CLOCK_SYNC_HAS_STP 0 |
1568 | +#define CLOCK_SYNC_STP 1 |
1569 | +#define CLOCK_SYNC_STPINFO_VALID 2 |
1570 | |
1571 | /* |
1572 | * The get_clock function for the physical clock. It will get the current |
1573 | @@ -592,6 +593,22 @@ void stp_queue_work(void) |
1574 | queue_work(time_sync_wq, &stp_work); |
1575 | } |
1576 | |
1577 | +static int __store_stpinfo(void) |
1578 | +{ |
1579 | + int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); |
1580 | + |
1581 | + if (rc) |
1582 | + clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags); |
1583 | + else |
1584 | + set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags); |
1585 | + return rc; |
1586 | +} |
1587 | + |
1588 | +static int stpinfo_valid(void) |
1589 | +{ |
1590 | + return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags); |
1591 | +} |
1592 | + |
1593 | static int stp_sync_clock(void *data) |
1594 | { |
1595 | struct clock_sync_data *sync = data; |
1596 | @@ -613,8 +630,7 @@ static int stp_sync_clock(void *data) |
1597 | if (rc == 0) { |
1598 | sync->clock_delta = clock_delta; |
1599 | clock_sync_global(clock_delta); |
1600 | - rc = chsc_sstpi(stp_page, &stp_info, |
1601 | - sizeof(struct stp_sstpi)); |
1602 | + rc = __store_stpinfo(); |
1603 | if (rc == 0 && stp_info.tmd != 2) |
1604 | rc = -EAGAIN; |
1605 | } |
1606 | @@ -659,7 +675,7 @@ static void stp_work_fn(struct work_struct *work) |
1607 | if (rc) |
1608 | goto out_unlock; |
1609 | |
1610 | - rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); |
1611 | + rc = __store_stpinfo(); |
1612 | if (rc || stp_info.c == 0) |
1613 | goto out_unlock; |
1614 | |
1615 | @@ -696,10 +712,14 @@ static ssize_t stp_ctn_id_show(struct device *dev, |
1616 | struct device_attribute *attr, |
1617 | char *buf) |
1618 | { |
1619 | - if (!stp_online) |
1620 | - return -ENODATA; |
1621 | - return sprintf(buf, "%016llx\n", |
1622 | - *(unsigned long long *) stp_info.ctnid); |
1623 | + ssize_t ret = -ENODATA; |
1624 | + |
1625 | + mutex_lock(&stp_work_mutex); |
1626 | + if (stpinfo_valid()) |
1627 | + ret = sprintf(buf, "%016llx\n", |
1628 | + *(unsigned long long *) stp_info.ctnid); |
1629 | + mutex_unlock(&stp_work_mutex); |
1630 | + return ret; |
1631 | } |
1632 | |
1633 | static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL); |
1634 | @@ -708,9 +728,13 @@ static ssize_t stp_ctn_type_show(struct device *dev, |
1635 | struct device_attribute *attr, |
1636 | char *buf) |
1637 | { |
1638 | - if (!stp_online) |
1639 | - return -ENODATA; |
1640 | - return sprintf(buf, "%i\n", stp_info.ctn); |
1641 | + ssize_t ret = -ENODATA; |
1642 | + |
1643 | + mutex_lock(&stp_work_mutex); |
1644 | + if (stpinfo_valid()) |
1645 | + ret = sprintf(buf, "%i\n", stp_info.ctn); |
1646 | + mutex_unlock(&stp_work_mutex); |
1647 | + return ret; |
1648 | } |
1649 | |
1650 | static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL); |
1651 | @@ -719,9 +743,13 @@ static ssize_t stp_dst_offset_show(struct device *dev, |
1652 | struct device_attribute *attr, |
1653 | char *buf) |
1654 | { |
1655 | - if (!stp_online || !(stp_info.vbits & 0x2000)) |
1656 | - return -ENODATA; |
1657 | - return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto); |
1658 | + ssize_t ret = -ENODATA; |
1659 | + |
1660 | + mutex_lock(&stp_work_mutex); |
1661 | + if (stpinfo_valid() && (stp_info.vbits & 0x2000)) |
1662 | + ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto); |
1663 | + mutex_unlock(&stp_work_mutex); |
1664 | + return ret; |
1665 | } |
1666 | |
1667 | static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL); |
1668 | @@ -730,9 +758,13 @@ static ssize_t stp_leap_seconds_show(struct device *dev, |
1669 | struct device_attribute *attr, |
1670 | char *buf) |
1671 | { |
1672 | - if (!stp_online || !(stp_info.vbits & 0x8000)) |
1673 | - return -ENODATA; |
1674 | - return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps); |
1675 | + ssize_t ret = -ENODATA; |
1676 | + |
1677 | + mutex_lock(&stp_work_mutex); |
1678 | + if (stpinfo_valid() && (stp_info.vbits & 0x8000)) |
1679 | + ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps); |
1680 | + mutex_unlock(&stp_work_mutex); |
1681 | + return ret; |
1682 | } |
1683 | |
1684 | static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL); |
1685 | @@ -741,9 +773,13 @@ static ssize_t stp_stratum_show(struct device *dev, |
1686 | struct device_attribute *attr, |
1687 | char *buf) |
1688 | { |
1689 | - if (!stp_online) |
1690 | - return -ENODATA; |
1691 | - return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum); |
1692 | + ssize_t ret = -ENODATA; |
1693 | + |
1694 | + mutex_lock(&stp_work_mutex); |
1695 | + if (stpinfo_valid()) |
1696 | + ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum); |
1697 | + mutex_unlock(&stp_work_mutex); |
1698 | + return ret; |
1699 | } |
1700 | |
1701 | static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL); |
1702 | @@ -752,9 +788,13 @@ static ssize_t stp_time_offset_show(struct device *dev, |
1703 | struct device_attribute *attr, |
1704 | char *buf) |
1705 | { |
1706 | - if (!stp_online || !(stp_info.vbits & 0x0800)) |
1707 | - return -ENODATA; |
1708 | - return sprintf(buf, "%i\n", (int) stp_info.tto); |
1709 | + ssize_t ret = -ENODATA; |
1710 | + |
1711 | + mutex_lock(&stp_work_mutex); |
1712 | + if (stpinfo_valid() && (stp_info.vbits & 0x0800)) |
1713 | + ret = sprintf(buf, "%i\n", (int) stp_info.tto); |
1714 | + mutex_unlock(&stp_work_mutex); |
1715 | + return ret; |
1716 | } |
1717 | |
1718 | static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL); |
1719 | @@ -763,9 +803,13 @@ static ssize_t stp_time_zone_offset_show(struct device *dev, |
1720 | struct device_attribute *attr, |
1721 | char *buf) |
1722 | { |
1723 | - if (!stp_online || !(stp_info.vbits & 0x4000)) |
1724 | - return -ENODATA; |
1725 | - return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo); |
1726 | + ssize_t ret = -ENODATA; |
1727 | + |
1728 | + mutex_lock(&stp_work_mutex); |
1729 | + if (stpinfo_valid() && (stp_info.vbits & 0x4000)) |
1730 | + ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo); |
1731 | + mutex_unlock(&stp_work_mutex); |
1732 | + return ret; |
1733 | } |
1734 | |
1735 | static DEVICE_ATTR(time_zone_offset, 0400, |
1736 | @@ -775,9 +819,13 @@ static ssize_t stp_timing_mode_show(struct device *dev, |
1737 | struct device_attribute *attr, |
1738 | char *buf) |
1739 | { |
1740 | - if (!stp_online) |
1741 | - return -ENODATA; |
1742 | - return sprintf(buf, "%i\n", stp_info.tmd); |
1743 | + ssize_t ret = -ENODATA; |
1744 | + |
1745 | + mutex_lock(&stp_work_mutex); |
1746 | + if (stpinfo_valid()) |
1747 | + ret = sprintf(buf, "%i\n", stp_info.tmd); |
1748 | + mutex_unlock(&stp_work_mutex); |
1749 | + return ret; |
1750 | } |
1751 | |
1752 | static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL); |
1753 | @@ -786,9 +834,13 @@ static ssize_t stp_timing_state_show(struct device *dev, |
1754 | struct device_attribute *attr, |
1755 | char *buf) |
1756 | { |
1757 | - if (!stp_online) |
1758 | - return -ENODATA; |
1759 | - return sprintf(buf, "%i\n", stp_info.tst); |
1760 | + ssize_t ret = -ENODATA; |
1761 | + |
1762 | + mutex_lock(&stp_work_mutex); |
1763 | + if (stpinfo_valid()) |
1764 | + ret = sprintf(buf, "%i\n", stp_info.tst); |
1765 | + mutex_unlock(&stp_work_mutex); |
1766 | + return ret; |
1767 | } |
1768 | |
1769 | static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL); |
1770 | diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c |
1771 | index a8275fea4b70c..aa81c25b44cf3 100644 |
1772 | --- a/arch/sparc/kernel/smp_64.c |
1773 | +++ b/arch/sparc/kernel/smp_64.c |
1774 | @@ -1039,38 +1039,9 @@ void smp_fetch_global_pmu(void) |
1775 | * are flush_tlb_*() routines, and these run after flush_cache_*() |
1776 | * which performs the flushw. |
1777 | * |
1778 | - * The SMP TLB coherency scheme we use works as follows: |
1779 | - * |
1780 | - * 1) mm->cpu_vm_mask is a bit mask of which cpus an address |
1781 | - * space has (potentially) executed on, this is the heuristic |
1782 | - * we use to avoid doing cross calls. |
1783 | - * |
1784 | - * Also, for flushing from kswapd and also for clones, we |
1785 | - * use cpu_vm_mask as the list of cpus to make run the TLB. |
1786 | - * |
1787 | - * 2) TLB context numbers are shared globally across all processors |
1788 | - * in the system, this allows us to play several games to avoid |
1789 | - * cross calls. |
1790 | - * |
1791 | - * One invariant is that when a cpu switches to a process, and |
1792 | - * that processes tsk->active_mm->cpu_vm_mask does not have the |
1793 | - * current cpu's bit set, that tlb context is flushed locally. |
1794 | - * |
1795 | - * If the address space is non-shared (ie. mm->count == 1) we avoid |
1796 | - * cross calls when we want to flush the currently running process's |
1797 | - * tlb state. This is done by clearing all cpu bits except the current |
1798 | - * processor's in current->mm->cpu_vm_mask and performing the |
1799 | - * flush locally only. This will force any subsequent cpus which run |
1800 | - * this task to flush the context from the local tlb if the process |
1801 | - * migrates to another cpu (again). |
1802 | - * |
1803 | - * 3) For shared address spaces (threads) and swapping we bite the |
1804 | - * bullet for most cases and perform the cross call (but only to |
1805 | - * the cpus listed in cpu_vm_mask). |
1806 | - * |
1807 | - * The performance gain from "optimizing" away the cross call for threads is |
1808 | - * questionable (in theory the big win for threads is the massive sharing of |
1809 | - * address space state across processors). |
1810 | + * mm->cpu_vm_mask is a bit mask of which cpus an address |
1811 | + * space has (potentially) executed on, this is the heuristic |
1812 | + * we use to limit cross calls. |
1813 | */ |
1814 | |
1815 | /* This currently is only used by the hugetlb arch pre-fault |
1816 | @@ -1080,18 +1051,13 @@ void smp_fetch_global_pmu(void) |
1817 | void smp_flush_tlb_mm(struct mm_struct *mm) |
1818 | { |
1819 | u32 ctx = CTX_HWBITS(mm->context); |
1820 | - int cpu = get_cpu(); |
1821 | |
1822 | - if (atomic_read(&mm->mm_users) == 1) { |
1823 | - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); |
1824 | - goto local_flush_and_out; |
1825 | - } |
1826 | + get_cpu(); |
1827 | |
1828 | smp_cross_call_masked(&xcall_flush_tlb_mm, |
1829 | ctx, 0, 0, |
1830 | mm_cpumask(mm)); |
1831 | |
1832 | -local_flush_and_out: |
1833 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); |
1834 | |
1835 | put_cpu(); |
1836 | @@ -1114,17 +1080,15 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long |
1837 | { |
1838 | u32 ctx = CTX_HWBITS(mm->context); |
1839 | struct tlb_pending_info info; |
1840 | - int cpu = get_cpu(); |
1841 | + |
1842 | + get_cpu(); |
1843 | |
1844 | info.ctx = ctx; |
1845 | info.nr = nr; |
1846 | info.vaddrs = vaddrs; |
1847 | |
1848 | - if (mm == current->mm && atomic_read(&mm->mm_users) == 1) |
1849 | - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); |
1850 | - else |
1851 | - smp_call_function_many(mm_cpumask(mm), tlb_pending_func, |
1852 | - &info, 1); |
1853 | + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, |
1854 | + &info, 1); |
1855 | |
1856 | __flush_tlb_pending(ctx, nr, vaddrs); |
1857 | |
1858 | @@ -1134,14 +1098,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long |
1859 | void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) |
1860 | { |
1861 | unsigned long context = CTX_HWBITS(mm->context); |
1862 | - int cpu = get_cpu(); |
1863 | |
1864 | - if (mm == current->mm && atomic_read(&mm->mm_users) == 1) |
1865 | - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); |
1866 | - else |
1867 | - smp_cross_call_masked(&xcall_flush_tlb_page, |
1868 | - context, vaddr, 0, |
1869 | - mm_cpumask(mm)); |
1870 | + get_cpu(); |
1871 | + |
1872 | + smp_cross_call_masked(&xcall_flush_tlb_page, |
1873 | + context, vaddr, 0, |
1874 | + mm_cpumask(mm)); |
1875 | + |
1876 | __flush_tlb_page(context, vaddr); |
1877 | |
1878 | put_cpu(); |
1879 | diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c |
1880 | index 10c99e058fcae..d1cffc2a7f212 100644 |
1881 | --- a/arch/um/kernel/sigio.c |
1882 | +++ b/arch/um/kernel/sigio.c |
1883 | @@ -35,14 +35,14 @@ int write_sigio_irq(int fd) |
1884 | } |
1885 | |
1886 | /* These are called from os-Linux/sigio.c to protect its pollfds arrays. */ |
1887 | -static DEFINE_SPINLOCK(sigio_spinlock); |
1888 | +static DEFINE_MUTEX(sigio_mutex); |
1889 | |
1890 | void sigio_lock(void) |
1891 | { |
1892 | - spin_lock(&sigio_spinlock); |
1893 | + mutex_lock(&sigio_mutex); |
1894 | } |
1895 | |
1896 | void sigio_unlock(void) |
1897 | { |
1898 | - spin_unlock(&sigio_spinlock); |
1899 | + mutex_unlock(&sigio_mutex); |
1900 | } |
1901 | diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c |
1902 | index a023cbe21230a..39169885adfa8 100644 |
1903 | --- a/arch/x86/events/amd/ibs.c |
1904 | +++ b/arch/x86/events/amd/ibs.c |
1905 | @@ -335,11 +335,15 @@ static u64 get_ibs_op_count(u64 config) |
1906 | { |
1907 | u64 count = 0; |
1908 | |
1909 | + /* |
1910 | + * If the internal 27-bit counter rolled over, the count is MaxCnt |
1911 | + * and the lower 7 bits of CurCnt are randomized. |
1912 | + * Otherwise CurCnt has the full 27-bit current counter value. |
1913 | + */ |
1914 | if (config & IBS_OP_VAL) |
1915 | - count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */ |
1916 | - |
1917 | - if (ibs_caps & IBS_CAPS_RDWROPCNT) |
1918 | - count += (config & IBS_OP_CUR_CNT) >> 32; |
1919 | + count = (config & IBS_OP_MAX_CNT) << 4; |
1920 | + else if (ibs_caps & IBS_CAPS_RDWROPCNT) |
1921 | + count = (config & IBS_OP_CUR_CNT) >> 32; |
1922 | |
1923 | return count; |
1924 | } |
1925 | @@ -632,18 +636,24 @@ fail: |
1926 | perf_ibs->offset_max, |
1927 | offset + 1); |
1928 | } while (offset < offset_max); |
1929 | + /* |
1930 | + * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately |
1931 | + * depending on their availability. |
1932 | + * Can't add to offset_max as they are staggered |
1933 | + */ |
1934 | if (event->attr.sample_type & PERF_SAMPLE_RAW) { |
1935 | - /* |
1936 | - * Read IbsBrTarget and IbsOpData4 separately |
1937 | - * depending on their availability. |
1938 | - * Can't add to offset_max as they are staggered |
1939 | - */ |
1940 | - if (ibs_caps & IBS_CAPS_BRNTRGT) { |
1941 | - rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); |
1942 | - size++; |
1943 | + if (perf_ibs == &perf_ibs_op) { |
1944 | + if (ibs_caps & IBS_CAPS_BRNTRGT) { |
1945 | + rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); |
1946 | + size++; |
1947 | + } |
1948 | + if (ibs_caps & IBS_CAPS_OPDATA4) { |
1949 | + rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); |
1950 | + size++; |
1951 | + } |
1952 | } |
1953 | - if (ibs_caps & IBS_CAPS_OPDATA4) { |
1954 | - rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); |
1955 | + if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) { |
1956 | + rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++); |
1957 | size++; |
1958 | } |
1959 | } |
1960 | diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c |
1961 | index 0461ab257df61..c4def90777475 100644 |
1962 | --- a/arch/x86/events/intel/core.c |
1963 | +++ b/arch/x86/events/intel/core.c |
1964 | @@ -243,7 +243,7 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = { |
1965 | |
1966 | static struct event_constraint intel_icl_event_constraints[] = { |
1967 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
1968 | - INTEL_UEVENT_CONSTRAINT(0x1c0, 0), /* INST_RETIRED.PREC_DIST */ |
1969 | + FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* INST_RETIRED.PREC_DIST */ |
1970 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
1971 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ |
1972 | FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ |
1973 | diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h |
1974 | index 391812e0384ec..f312b6f6ac481 100644 |
1975 | --- a/arch/x86/include/asm/msr-index.h |
1976 | +++ b/arch/x86/include/asm/msr-index.h |
1977 | @@ -432,6 +432,7 @@ |
1978 | #define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1) |
1979 | #define MSR_AMD64_IBSCTL 0xc001103a |
1980 | #define MSR_AMD64_IBSBRTARGET 0xc001103b |
1981 | +#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c |
1982 | #define MSR_AMD64_IBSOPDATA4 0xc001103d |
1983 | #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ |
1984 | #define MSR_AMD64_SEV 0xc0010131 |
1985 | diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c |
1986 | index 187a86e0e7531..f29f015a5e7f3 100644 |
1987 | --- a/arch/x86/kernel/unwind_orc.c |
1988 | +++ b/arch/x86/kernel/unwind_orc.c |
1989 | @@ -311,19 +311,12 @@ EXPORT_SYMBOL_GPL(unwind_get_return_address); |
1990 | |
1991 | unsigned long *unwind_get_return_address_ptr(struct unwind_state *state) |
1992 | { |
1993 | - struct task_struct *task = state->task; |
1994 | - |
1995 | if (unwind_done(state)) |
1996 | return NULL; |
1997 | |
1998 | if (state->regs) |
1999 | return &state->regs->ip; |
2000 | |
2001 | - if (task != current && state->sp == task->thread.sp) { |
2002 | - struct inactive_task_frame *frame = (void *)task->thread.sp; |
2003 | - return &frame->ret_addr; |
2004 | - } |
2005 | - |
2006 | if (state->sp) |
2007 | return (unsigned long *)state->sp - 1; |
2008 | |
2009 | @@ -653,7 +646,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, |
2010 | } else { |
2011 | struct inactive_task_frame *frame = (void *)task->thread.sp; |
2012 | |
2013 | - state->sp = task->thread.sp; |
2014 | + state->sp = task->thread.sp + sizeof(*frame); |
2015 | state->bp = READ_ONCE_NOCHECK(frame->bp); |
2016 | state->ip = READ_ONCE_NOCHECK(frame->ret_addr); |
2017 | state->signal = (void *)state->ip == ret_from_fork; |
2018 | diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c |
2019 | index 7a265c2171c08..cc4b509bad94d 100644 |
2020 | --- a/drivers/acpi/acpi_dbg.c |
2021 | +++ b/drivers/acpi/acpi_dbg.c |
2022 | @@ -749,6 +749,9 @@ int __init acpi_aml_init(void) |
2023 | { |
2024 | int ret; |
2025 | |
2026 | + if (acpi_disabled) |
2027 | + return -ENODEV; |
2028 | + |
2029 | /* Initialize AML IO interface */ |
2030 | mutex_init(&acpi_aml_io.lock); |
2031 | init_waitqueue_head(&acpi_aml_io.wait); |
2032 | diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c |
2033 | index 8596a106a933c..91d0b0fc392b1 100644 |
2034 | --- a/drivers/acpi/acpi_extlog.c |
2035 | +++ b/drivers/acpi/acpi_extlog.c |
2036 | @@ -223,9 +223,9 @@ static int __init extlog_init(void) |
2037 | u64 cap; |
2038 | int rc; |
2039 | |
2040 | - rdmsrl(MSR_IA32_MCG_CAP, cap); |
2041 | - |
2042 | - if (!(cap & MCG_ELOG_P) || !extlog_get_l1addr()) |
2043 | + if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) || |
2044 | + !(cap & MCG_ELOG_P) || |
2045 | + !extlog_get_l1addr()) |
2046 | return -ENODEV; |
2047 | |
2048 | if (edac_get_report_status() == EDAC_REPORTING_FORCE) { |
2049 | diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c |
2050 | index 985afc62da82a..690bfe67e643d 100644 |
2051 | --- a/drivers/acpi/button.c |
2052 | +++ b/drivers/acpi/button.c |
2053 | @@ -136,6 +136,7 @@ struct acpi_button { |
2054 | int last_state; |
2055 | ktime_t last_time; |
2056 | bool suspended; |
2057 | + bool lid_state_initialized; |
2058 | }; |
2059 | |
2060 | static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); |
2061 | @@ -391,6 +392,8 @@ static int acpi_lid_update_state(struct acpi_device *device, |
2062 | |
2063 | static void acpi_lid_initialize_state(struct acpi_device *device) |
2064 | { |
2065 | + struct acpi_button *button = acpi_driver_data(device); |
2066 | + |
2067 | switch (lid_init_state) { |
2068 | case ACPI_BUTTON_LID_INIT_OPEN: |
2069 | (void)acpi_lid_notify_state(device, 1); |
2070 | @@ -402,13 +405,14 @@ static void acpi_lid_initialize_state(struct acpi_device *device) |
2071 | default: |
2072 | break; |
2073 | } |
2074 | + |
2075 | + button->lid_state_initialized = true; |
2076 | } |
2077 | |
2078 | static void acpi_button_notify(struct acpi_device *device, u32 event) |
2079 | { |
2080 | struct acpi_button *button = acpi_driver_data(device); |
2081 | struct input_dev *input; |
2082 | - int users; |
2083 | |
2084 | switch (event) { |
2085 | case ACPI_FIXED_HARDWARE_EVENT: |
2086 | @@ -417,10 +421,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) |
2087 | case ACPI_BUTTON_NOTIFY_STATUS: |
2088 | input = button->input; |
2089 | if (button->type == ACPI_BUTTON_TYPE_LID) { |
2090 | - mutex_lock(&button->input->mutex); |
2091 | - users = button->input->users; |
2092 | - mutex_unlock(&button->input->mutex); |
2093 | - if (users) |
2094 | + if (button->lid_state_initialized) |
2095 | acpi_lid_update_state(device, true); |
2096 | } else { |
2097 | int keycode; |
2098 | @@ -465,7 +466,7 @@ static int acpi_button_resume(struct device *dev) |
2099 | struct acpi_button *button = acpi_driver_data(device); |
2100 | |
2101 | button->suspended = false; |
2102 | - if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) { |
2103 | + if (button->type == ACPI_BUTTON_TYPE_LID) { |
2104 | button->last_state = !!acpi_lid_evaluate_state(device); |
2105 | button->last_time = ktime_get(); |
2106 | acpi_lid_initialize_state(device); |
2107 | diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c |
2108 | index 1ec55345252b6..c64001e789ed7 100644 |
2109 | --- a/drivers/acpi/ec.c |
2110 | +++ b/drivers/acpi/ec.c |
2111 | @@ -1968,20 +1968,16 @@ bool acpi_ec_dispatch_gpe(void) |
2112 | if (acpi_any_gpe_status_set(first_ec->gpe)) |
2113 | return true; |
2114 | |
2115 | - if (ec_no_wakeup) |
2116 | - return false; |
2117 | - |
2118 | /* |
2119 | * Dispatch the EC GPE in-band, but do not report wakeup in any case |
2120 | * to allow the caller to process events properly after that. |
2121 | */ |
2122 | ret = acpi_dispatch_gpe(NULL, first_ec->gpe); |
2123 | - if (ret == ACPI_INTERRUPT_HANDLED) { |
2124 | + if (ret == ACPI_INTERRUPT_HANDLED) |
2125 | pm_pr_dbg("EC GPE dispatched\n"); |
2126 | |
2127 | - /* Flush the event and query workqueues. */ |
2128 | - acpi_ec_flush_work(); |
2129 | - } |
2130 | + /* Flush the event and query workqueues. */ |
2131 | + acpi_ec_flush_work(); |
2132 | |
2133 | return false; |
2134 | } |
2135 | diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c |
2136 | index 8b0de8a3c6470..0f1c939b7e901 100644 |
2137 | --- a/drivers/acpi/hmat/hmat.c |
2138 | +++ b/drivers/acpi/hmat/hmat.c |
2139 | @@ -403,7 +403,8 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade |
2140 | pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n", |
2141 | p->flags, p->processor_PD, p->memory_PD); |
2142 | |
2143 | - if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) { |
2144 | + if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) || |
2145 | + hmat_revision > 1) { |
2146 | target = find_mem_target(p->memory_PD); |
2147 | if (!target) { |
2148 | pr_debug("HMAT: Memory Domain missing from SRAT\n"); |
2149 | diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c |
2150 | index eadbf90e65d14..85e01752fbe47 100644 |
2151 | --- a/drivers/acpi/numa.c |
2152 | +++ b/drivers/acpi/numa.c |
2153 | @@ -31,7 +31,7 @@ int acpi_numa __initdata; |
2154 | |
2155 | int pxm_to_node(int pxm) |
2156 | { |
2157 | - if (pxm < 0) |
2158 | + if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off) |
2159 | return NUMA_NO_NODE; |
2160 | return pxm_to_node_map[pxm]; |
2161 | } |
2162 | diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c |
2163 | index 5bcb4c01ec5f0..55af78b55c513 100644 |
2164 | --- a/drivers/acpi/video_detect.c |
2165 | +++ b/drivers/acpi/video_detect.c |
2166 | @@ -282,6 +282,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = { |
2167 | DMI_MATCH(DMI_PRODUCT_NAME, "530U4E/540U4E"), |
2168 | }, |
2169 | }, |
2170 | + /* https://bugs.launchpad.net/bugs/1894667 */ |
2171 | + { |
2172 | + .callback = video_detect_force_video, |
2173 | + .ident = "HP 635 Notebook", |
2174 | + .matches = { |
2175 | + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), |
2176 | + DMI_MATCH(DMI_PRODUCT_NAME, "HP 635 Notebook PC"), |
2177 | + }, |
2178 | + }, |
2179 | |
2180 | /* Non win8 machines which need native backlight nevertheless */ |
2181 | { |
2182 | diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c |
2183 | index 18b147c182b96..0514aa7e80e39 100644 |
2184 | --- a/drivers/ata/sata_nv.c |
2185 | +++ b/drivers/ata/sata_nv.c |
2186 | @@ -2100,7 +2100,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap) |
2187 | pp->dhfis_bits &= ~done_mask; |
2188 | pp->dmafis_bits &= ~done_mask; |
2189 | pp->sdbfis_bits |= done_mask; |
2190 | - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); |
2191 | + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); |
2192 | |
2193 | if (!ap->qc_active) { |
2194 | DPRINTK("over\n"); |
2195 | diff --git a/drivers/base/core.c b/drivers/base/core.c |
2196 | index 94df2ba1bbed7..0fde3e9e63ee3 100644 |
2197 | --- a/drivers/base/core.c |
2198 | +++ b/drivers/base/core.c |
2199 | @@ -3400,6 +3400,7 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) |
2200 | */ |
2201 | void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) |
2202 | { |
2203 | + struct device *parent = dev->parent; |
2204 | struct fwnode_handle *fn = dev->fwnode; |
2205 | |
2206 | if (fwnode) { |
2207 | @@ -3414,7 +3415,8 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) |
2208 | } else { |
2209 | if (fwnode_is_primary(fn)) { |
2210 | dev->fwnode = fn->secondary; |
2211 | - fn->secondary = NULL; |
2212 | + if (!(parent && fn == parent->fwnode)) |
2213 | + fn->secondary = ERR_PTR(-ENODEV); |
2214 | } else { |
2215 | dev->fwnode = NULL; |
2216 | } |
2217 | diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c |
2218 | index 48616f358854a..4244e22e4b403 100644 |
2219 | --- a/drivers/base/power/runtime.c |
2220 | +++ b/drivers/base/power/runtime.c |
2221 | @@ -291,8 +291,7 @@ static int rpm_get_suppliers(struct device *dev) |
2222 | device_links_read_lock_held()) { |
2223 | int retval; |
2224 | |
2225 | - if (!(link->flags & DL_FLAG_PM_RUNTIME) || |
2226 | - READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) |
2227 | + if (!(link->flags & DL_FLAG_PM_RUNTIME)) |
2228 | continue; |
2229 | |
2230 | retval = pm_runtime_get_sync(link->supplier); |
2231 | @@ -312,8 +311,6 @@ static void rpm_put_suppliers(struct device *dev) |
2232 | |
2233 | list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, |
2234 | device_links_read_lock_held()) { |
2235 | - if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) |
2236 | - continue; |
2237 | |
2238 | while (refcount_dec_not_one(&link->rpm_active)) |
2239 | pm_runtime_put(link->supplier); |
2240 | diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c |
2241 | index 7c577cabb9c3b..742f8160b6e28 100644 |
2242 | --- a/drivers/block/nbd.c |
2243 | +++ b/drivers/block/nbd.c |
2244 | @@ -787,9 +787,9 @@ static void recv_work(struct work_struct *work) |
2245 | |
2246 | blk_mq_complete_request(blk_mq_rq_from_pdu(cmd)); |
2247 | } |
2248 | + nbd_config_put(nbd); |
2249 | atomic_dec(&config->recv_threads); |
2250 | wake_up(&config->recv_wq); |
2251 | - nbd_config_put(nbd); |
2252 | kfree(args); |
2253 | } |
2254 | |
2255 | diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c |
2256 | index 3666afa639d1a..b18f0162cb9c4 100644 |
2257 | --- a/drivers/block/xen-blkback/blkback.c |
2258 | +++ b/drivers/block/xen-blkback/blkback.c |
2259 | @@ -202,7 +202,7 @@ static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num) |
2260 | |
2261 | #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) |
2262 | |
2263 | -static int do_block_io_op(struct xen_blkif_ring *ring); |
2264 | +static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags); |
2265 | static int dispatch_rw_block_io(struct xen_blkif_ring *ring, |
2266 | struct blkif_request *req, |
2267 | struct pending_req *pending_req); |
2268 | @@ -615,6 +615,8 @@ int xen_blkif_schedule(void *arg) |
2269 | struct xen_vbd *vbd = &blkif->vbd; |
2270 | unsigned long timeout; |
2271 | int ret; |
2272 | + bool do_eoi; |
2273 | + unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; |
2274 | |
2275 | set_freezable(); |
2276 | while (!kthread_should_stop()) { |
2277 | @@ -639,16 +641,23 @@ int xen_blkif_schedule(void *arg) |
2278 | if (timeout == 0) |
2279 | goto purge_gnt_list; |
2280 | |
2281 | + do_eoi = ring->waiting_reqs; |
2282 | + |
2283 | ring->waiting_reqs = 0; |
2284 | smp_mb(); /* clear flag *before* checking for work */ |
2285 | |
2286 | - ret = do_block_io_op(ring); |
2287 | + ret = do_block_io_op(ring, &eoi_flags); |
2288 | if (ret > 0) |
2289 | ring->waiting_reqs = 1; |
2290 | if (ret == -EACCES) |
2291 | wait_event_interruptible(ring->shutdown_wq, |
2292 | kthread_should_stop()); |
2293 | |
2294 | + if (do_eoi && !ring->waiting_reqs) { |
2295 | + xen_irq_lateeoi(ring->irq, eoi_flags); |
2296 | + eoi_flags |= XEN_EOI_FLAG_SPURIOUS; |
2297 | + } |
2298 | + |
2299 | purge_gnt_list: |
2300 | if (blkif->vbd.feature_gnt_persistent && |
2301 | time_after(jiffies, ring->next_lru)) { |
2302 | @@ -1121,7 +1130,7 @@ static void end_block_io_op(struct bio *bio) |
2303 | * and transmute it to the block API to hand it over to the proper block disk. |
2304 | */ |
2305 | static int |
2306 | -__do_block_io_op(struct xen_blkif_ring *ring) |
2307 | +__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) |
2308 | { |
2309 | union blkif_back_rings *blk_rings = &ring->blk_rings; |
2310 | struct blkif_request req; |
2311 | @@ -1144,6 +1153,9 @@ __do_block_io_op(struct xen_blkif_ring *ring) |
2312 | if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) |
2313 | break; |
2314 | |
2315 | + /* We've seen a request, so clear spurious eoi flag. */ |
2316 | + *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS; |
2317 | + |
2318 | if (kthread_should_stop()) { |
2319 | more_to_do = 1; |
2320 | break; |
2321 | @@ -1202,13 +1214,13 @@ done: |
2322 | } |
2323 | |
2324 | static int |
2325 | -do_block_io_op(struct xen_blkif_ring *ring) |
2326 | +do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) |
2327 | { |
2328 | union blkif_back_rings *blk_rings = &ring->blk_rings; |
2329 | int more_to_do; |
2330 | |
2331 | do { |
2332 | - more_to_do = __do_block_io_op(ring); |
2333 | + more_to_do = __do_block_io_op(ring, eoi_flags); |
2334 | if (more_to_do) |
2335 | break; |
2336 | |
2337 | diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c |
2338 | index c4cd68116e7fc..192ca58cc3c7f 100644 |
2339 | --- a/drivers/block/xen-blkback/xenbus.c |
2340 | +++ b/drivers/block/xen-blkback/xenbus.c |
2341 | @@ -229,9 +229,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, |
2342 | BUG(); |
2343 | } |
2344 | |
2345 | - err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, |
2346 | - xen_blkif_be_int, 0, |
2347 | - "blkif-backend", ring); |
2348 | + err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid, |
2349 | + evtchn, xen_blkif_be_int, 0, "blkif-backend", ring); |
2350 | if (err < 0) { |
2351 | xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); |
2352 | ring->blk_rings.common.sring = NULL; |
2353 | diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c |
2354 | index d9629fc13a155..0a4a387b615d5 100644 |
2355 | --- a/drivers/bus/fsl-mc/mc-io.c |
2356 | +++ b/drivers/bus/fsl-mc/mc-io.c |
2357 | @@ -129,7 +129,12 @@ error_destroy_mc_io: |
2358 | */ |
2359 | void fsl_destroy_mc_io(struct fsl_mc_io *mc_io) |
2360 | { |
2361 | - struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; |
2362 | + struct fsl_mc_device *dpmcp_dev; |
2363 | + |
2364 | + if (!mc_io) |
2365 | + return; |
2366 | + |
2367 | + dpmcp_dev = mc_io->dpmcp_dev; |
2368 | |
2369 | if (dpmcp_dev) |
2370 | fsl_mc_io_unset_dpmcp(mc_io); |
2371 | diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c |
2372 | index 423a99b9f10c7..8d0dea188a284 100644 |
2373 | --- a/drivers/clk/ti/clockdomain.c |
2374 | +++ b/drivers/clk/ti/clockdomain.c |
2375 | @@ -146,10 +146,12 @@ static void __init of_ti_clockdomain_setup(struct device_node *node) |
2376 | if (!omap2_clk_is_hw_omap(clk_hw)) { |
2377 | pr_warn("can't setup clkdm for basic clk %s\n", |
2378 | __clk_get_name(clk)); |
2379 | + clk_put(clk); |
2380 | continue; |
2381 | } |
2382 | to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name; |
2383 | omap2_init_clk_clkdm(clk_hw); |
2384 | + clk_put(clk); |
2385 | } |
2386 | } |
2387 | |
2388 | diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c |
2389 | index d6f7df33ab8ce..4195834a45912 100644 |
2390 | --- a/drivers/cpufreq/acpi-cpufreq.c |
2391 | +++ b/drivers/cpufreq/acpi-cpufreq.c |
2392 | @@ -688,7 +688,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) |
2393 | cpumask_copy(policy->cpus, topology_core_cpumask(cpu)); |
2394 | } |
2395 | |
2396 | - if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { |
2397 | + if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 && |
2398 | + !acpi_pstate_strict) { |
2399 | cpumask_clear(policy->cpus); |
2400 | cpumask_set_cpu(cpu, policy->cpus); |
2401 | cpumask_copy(data->freqdomain_cpus, |
2402 | diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c |
2403 | index 8f16bbb164b84..2855b7878a204 100644 |
2404 | --- a/drivers/cpufreq/sti-cpufreq.c |
2405 | +++ b/drivers/cpufreq/sti-cpufreq.c |
2406 | @@ -141,7 +141,8 @@ static const struct reg_field sti_stih407_dvfs_regfields[DVFS_MAX_REGFIELDS] = { |
2407 | static const struct reg_field *sti_cpufreq_match(void) |
2408 | { |
2409 | if (of_machine_is_compatible("st,stih407") || |
2410 | - of_machine_is_compatible("st,stih410")) |
2411 | + of_machine_is_compatible("st,stih410") || |
2412 | + of_machine_is_compatible("st,stih418")) |
2413 | return sti_stih407_dvfs_regfields; |
2414 | |
2415 | return NULL; |
2416 | @@ -258,7 +259,8 @@ static int sti_cpufreq_init(void) |
2417 | int ret; |
2418 | |
2419 | if ((!of_machine_is_compatible("st,stih407")) && |
2420 | - (!of_machine_is_compatible("st,stih410"))) |
2421 | + (!of_machine_is_compatible("st,stih410")) && |
2422 | + (!of_machine_is_compatible("st,stih418"))) |
2423 | return -ENODEV; |
2424 | |
2425 | ddata.cpu = get_cpu_device(0); |
2426 | diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c |
2427 | index 0ecb724b394f5..e27043427653f 100644 |
2428 | --- a/drivers/dma/dma-jz4780.c |
2429 | +++ b/drivers/dma/dma-jz4780.c |
2430 | @@ -639,11 +639,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, |
2431 | unsigned long flags; |
2432 | unsigned long residue = 0; |
2433 | |
2434 | + spin_lock_irqsave(&jzchan->vchan.lock, flags); |
2435 | + |
2436 | status = dma_cookie_status(chan, cookie, txstate); |
2437 | if ((status == DMA_COMPLETE) || (txstate == NULL)) |
2438 | - return status; |
2439 | - |
2440 | - spin_lock_irqsave(&jzchan->vchan.lock, flags); |
2441 | + goto out_unlock_irqrestore; |
2442 | |
2443 | vdesc = vchan_find_desc(&jzchan->vchan, cookie); |
2444 | if (vdesc) { |
2445 | @@ -660,6 +660,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, |
2446 | && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) |
2447 | status = DMA_ERROR; |
2448 | |
2449 | +out_unlock_irqrestore: |
2450 | spin_unlock_irqrestore(&jzchan->vchan.lock, flags); |
2451 | return status; |
2452 | } |
2453 | diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c |
2454 | index d1c997599390a..5f52527526441 100644 |
2455 | --- a/drivers/extcon/extcon-ptn5150.c |
2456 | +++ b/drivers/extcon/extcon-ptn5150.c |
2457 | @@ -127,7 +127,7 @@ static void ptn5150_irq_work(struct work_struct *work) |
2458 | case PTN5150_DFP_ATTACHED: |
2459 | extcon_set_state_sync(info->edev, |
2460 | EXTCON_USB_HOST, false); |
2461 | - gpiod_set_value(info->vbus_gpiod, 0); |
2462 | + gpiod_set_value_cansleep(info->vbus_gpiod, 0); |
2463 | extcon_set_state_sync(info->edev, EXTCON_USB, |
2464 | true); |
2465 | break; |
2466 | @@ -138,9 +138,9 @@ static void ptn5150_irq_work(struct work_struct *work) |
2467 | PTN5150_REG_CC_VBUS_DETECTION_MASK) >> |
2468 | PTN5150_REG_CC_VBUS_DETECTION_SHIFT); |
2469 | if (vbus) |
2470 | - gpiod_set_value(info->vbus_gpiod, 0); |
2471 | + gpiod_set_value_cansleep(info->vbus_gpiod, 0); |
2472 | else |
2473 | - gpiod_set_value(info->vbus_gpiod, 1); |
2474 | + gpiod_set_value_cansleep(info->vbus_gpiod, 1); |
2475 | |
2476 | extcon_set_state_sync(info->edev, |
2477 | EXTCON_USB_HOST, true); |
2478 | @@ -156,7 +156,7 @@ static void ptn5150_irq_work(struct work_struct *work) |
2479 | EXTCON_USB_HOST, false); |
2480 | extcon_set_state_sync(info->edev, |
2481 | EXTCON_USB, false); |
2482 | - gpiod_set_value(info->vbus_gpiod, 0); |
2483 | + gpiod_set_value_cansleep(info->vbus_gpiod, 0); |
2484 | } |
2485 | } |
2486 | |
2487 | diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c |
2488 | index f804e8af6521b..f986ee8919f03 100644 |
2489 | --- a/drivers/firmware/arm_scmi/base.c |
2490 | +++ b/drivers/firmware/arm_scmi/base.c |
2491 | @@ -173,6 +173,8 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle, |
2492 | protocols_imp[tot_num_ret + loop] = *(list + loop); |
2493 | |
2494 | tot_num_ret += loop_num_ret; |
2495 | + |
2496 | + scmi_reset_rx_to_maxsz(handle, t); |
2497 | } while (loop_num_ret); |
2498 | |
2499 | scmi_xfer_put(handle, t); |
2500 | diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c |
2501 | index 32526a793f3ac..38400a8d0ca89 100644 |
2502 | --- a/drivers/firmware/arm_scmi/clock.c |
2503 | +++ b/drivers/firmware/arm_scmi/clock.c |
2504 | @@ -177,6 +177,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, |
2505 | } |
2506 | |
2507 | tot_rate_cnt += num_returned; |
2508 | + |
2509 | + scmi_reset_rx_to_maxsz(handle, t); |
2510 | /* |
2511 | * check for both returned and remaining to avoid infinite |
2512 | * loop due to buggy firmware |
2513 | diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h |
2514 | index 5237c2ff79fea..9a680b9af9e58 100644 |
2515 | --- a/drivers/firmware/arm_scmi/common.h |
2516 | +++ b/drivers/firmware/arm_scmi/common.h |
2517 | @@ -103,6 +103,8 @@ int scmi_do_xfer_with_response(const struct scmi_handle *h, |
2518 | struct scmi_xfer *xfer); |
2519 | int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id, |
2520 | size_t tx_size, size_t rx_size, struct scmi_xfer **p); |
2521 | +void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle, |
2522 | + struct scmi_xfer *xfer); |
2523 | int scmi_handle_put(const struct scmi_handle *handle); |
2524 | struct scmi_handle *scmi_handle_get(struct device *dev); |
2525 | void scmi_set_handle(struct scmi_device *scmi_dev); |
2526 | diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c |
2527 | index 3eb0382491ceb..11078199abed3 100644 |
2528 | --- a/drivers/firmware/arm_scmi/driver.c |
2529 | +++ b/drivers/firmware/arm_scmi/driver.c |
2530 | @@ -481,6 +481,14 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) |
2531 | return ret; |
2532 | } |
2533 | |
2534 | +void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle, |
2535 | + struct scmi_xfer *xfer) |
2536 | +{ |
2537 | + struct scmi_info *info = handle_to_scmi_info(handle); |
2538 | + |
2539 | + xfer->rx.len = info->desc->max_msg_size; |
2540 | +} |
2541 | + |
2542 | #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC) |
2543 | |
2544 | /** |
2545 | diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c |
2546 | index 601af4edad5e6..129a2887e964f 100644 |
2547 | --- a/drivers/firmware/arm_scmi/perf.c |
2548 | +++ b/drivers/firmware/arm_scmi/perf.c |
2549 | @@ -281,6 +281,8 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain, |
2550 | } |
2551 | |
2552 | tot_opp_cnt += num_returned; |
2553 | + |
2554 | + scmi_reset_rx_to_maxsz(handle, t); |
2555 | /* |
2556 | * check for both returned and remaining to avoid infinite |
2557 | * loop due to buggy firmware |
2558 | diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c |
2559 | index ab42c21c55175..6d223f345b6c9 100644 |
2560 | --- a/drivers/firmware/arm_scmi/reset.c |
2561 | +++ b/drivers/firmware/arm_scmi/reset.c |
2562 | @@ -35,9 +35,7 @@ struct scmi_msg_reset_domain_reset { |
2563 | #define EXPLICIT_RESET_ASSERT BIT(1) |
2564 | #define ASYNCHRONOUS_RESET BIT(2) |
2565 | __le32 reset_state; |
2566 | -#define ARCH_RESET_TYPE BIT(31) |
2567 | -#define COLD_RESET_STATE BIT(0) |
2568 | -#define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE) |
2569 | +#define ARCH_COLD_RESET 0 |
2570 | }; |
2571 | |
2572 | struct reset_dom_info { |
2573 | diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c |
2574 | index a400ea805fc23..931208bc48f12 100644 |
2575 | --- a/drivers/firmware/arm_scmi/sensors.c |
2576 | +++ b/drivers/firmware/arm_scmi/sensors.c |
2577 | @@ -154,6 +154,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle, |
2578 | } |
2579 | |
2580 | desc_index += num_returned; |
2581 | + |
2582 | + scmi_reset_rx_to_maxsz(handle, t); |
2583 | /* |
2584 | * check for both returned and remaining to avoid infinite |
2585 | * loop due to buggy firmware |
2586 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |
2587 | index 4105fbf571674..29141bff4b572 100644 |
2588 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |
2589 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |
2590 | @@ -3890,7 +3890,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ |
2591 | |
2592 | amdgpu_device_lock_adev(tmp_adev, false); |
2593 | r = amdgpu_device_pre_asic_reset(tmp_adev, |
2594 | - NULL, |
2595 | + (tmp_adev == adev) ? job : NULL, |
2596 | &need_full_reset); |
2597 | /*TODO Should we stop ?*/ |
2598 | if (r) { |
2599 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c |
2600 | index 5fa5158d18ee3..9d61d1b7a5691 100644 |
2601 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c |
2602 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c |
2603 | @@ -561,6 +561,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, |
2604 | struct ww_acquire_ctx ticket; |
2605 | struct list_head list, duplicates; |
2606 | uint64_t va_flags; |
2607 | + uint64_t vm_size; |
2608 | int r = 0; |
2609 | |
2610 | if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { |
2611 | @@ -581,6 +582,15 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, |
2612 | |
2613 | args->va_address &= AMDGPU_GMC_HOLE_MASK; |
2614 | |
2615 | + vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; |
2616 | + vm_size -= AMDGPU_VA_RESERVED_SIZE; |
2617 | + if (args->va_address + args->map_size > vm_size) { |
2618 | + dev_dbg(&dev->pdev->dev, |
2619 | + "va_address 0x%llx is in top reserved area 0x%llx\n", |
2620 | + args->va_address + args->map_size, vm_size); |
2621 | + return -EINVAL; |
2622 | + } |
2623 | + |
2624 | if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { |
2625 | dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n", |
2626 | args->flags); |
2627 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |
2628 | index 2eda3a8c330d3..4a64825b53cbd 100644 |
2629 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |
2630 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |
2631 | @@ -105,8 +105,8 @@ struct amdgpu_bo_list_entry; |
2632 | #define AMDGPU_MMHUB_0 1 |
2633 | #define AMDGPU_MMHUB_1 2 |
2634 | |
2635 | -/* hardcode that limit for now */ |
2636 | -#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20) |
2637 | +/* Reserve 2MB at top/bottom of address space for kernel use */ |
2638 | +#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20) |
2639 | |
2640 | /* max vmids dedicated for process */ |
2641 | #define AMDGPU_VM_MAX_RESERVED_VMID 1 |
2642 | diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c |
2643 | index 72e4d61ac7522..ad05933423337 100644 |
2644 | --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c |
2645 | +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c |
2646 | @@ -58,8 +58,9 @@ static int update_qpd_v10(struct device_queue_manager *dqm, |
2647 | /* check if sh_mem_config register already configured */ |
2648 | if (qpd->sh_mem_config == 0) { |
2649 | qpd->sh_mem_config = |
2650 | - SH_MEM_ALIGNMENT_MODE_UNALIGNED << |
2651 | - SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; |
2652 | + (SH_MEM_ALIGNMENT_MODE_UNALIGNED << |
2653 | + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | |
2654 | + (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT); |
2655 | #if 0 |
2656 | /* TODO: |
2657 | * This shouldn't be an issue with Navi10. Verify. |
2658 | diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
2659 | index 7c58085031732..d2dd387c95d86 100644 |
2660 | --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
2661 | +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
2662 | @@ -3956,6 +3956,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) |
2663 | struct amdgpu_device *adev = connector->dev->dev_private; |
2664 | struct amdgpu_display_manager *dm = &adev->dm; |
2665 | |
2666 | + /* |
2667 | + * Call only if mst_mgr was iniitalized before since it's not done |
2668 | + * for all connector types. |
2669 | + */ |
2670 | + if (aconnector->mst_mgr.dev) |
2671 | + drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); |
2672 | + |
2673 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ |
2674 | defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) |
2675 | |
2676 | diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
2677 | index 3efee7b3378a3..47cefc05fd3f5 100644 |
2678 | --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
2679 | +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
2680 | @@ -2268,7 +2268,7 @@ enum dc_status dc_link_validate_mode_timing( |
2681 | /* A hack to avoid failing any modes for EDID override feature on |
2682 | * topology change such as lower quality cable for DP or different dongle |
2683 | */ |
2684 | - if (link->remote_sinks[0]) |
2685 | + if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL) |
2686 | return DC_OK; |
2687 | |
2688 | /* Passive Dongle */ |
2689 | diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c |
2690 | index ddf66046616d6..6718777c826dc 100644 |
2691 | --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c |
2692 | +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c |
2693 | @@ -898,10 +898,10 @@ void enc1_stream_encoder_dp_blank( |
2694 | */ |
2695 | REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2); |
2696 | /* Larger delay to wait until VBLANK - use max retry of |
2697 | - * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode + |
2698 | + * 10us*10200=102ms. This covers 100.0ms of minimum 10 Hz mode + |
2699 | * a little more because we may not trust delay accuracy. |
2700 | */ |
2701 | - max_retries = DP_BLANK_MAX_RETRY * 250; |
2702 | + max_retries = DP_BLANK_MAX_RETRY * 501; |
2703 | |
2704 | /* disable DP stream */ |
2705 | REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); |
2706 | diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c |
2707 | index f8f85490e77e2..8a6fb58d01994 100644 |
2708 | --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c |
2709 | +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c |
2710 | @@ -63,13 +63,13 @@ enum gpio_result dal_gpio_open_ex( |
2711 | enum gpio_mode mode) |
2712 | { |
2713 | if (gpio->pin) { |
2714 | - ASSERT_CRITICAL(false); |
2715 | + BREAK_TO_DEBUGGER(); |
2716 | return GPIO_RESULT_ALREADY_OPENED; |
2717 | } |
2718 | |
2719 | // No action if allocation failed during gpio construct |
2720 | if (!gpio->hw_container.ddc) { |
2721 | - ASSERT_CRITICAL(false); |
2722 | + BREAK_TO_DEBUGGER(); |
2723 | return GPIO_RESULT_NON_SPECIFIC_ERROR; |
2724 | } |
2725 | gpio->mode = mode; |
2726 | diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h |
2727 | index 30ec80ac6fc81..8da322582b683 100644 |
2728 | --- a/drivers/gpu/drm/amd/display/dc/os_types.h |
2729 | +++ b/drivers/gpu/drm/amd/display/dc/os_types.h |
2730 | @@ -57,7 +57,7 @@ |
2731 | * general debug capabilities |
2732 | * |
2733 | */ |
2734 | -#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB) |
2735 | +#if defined(CONFIG_DEBUG_KERNEL_DC) && (defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)) |
2736 | #define ASSERT_CRITICAL(expr) do { \ |
2737 | if (WARN_ON(!(expr))) { \ |
2738 | kgdb_breakpoint(); \ |
2739 | diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |
2740 | index 1b55f037ba4a7..35e6cbe805eb4 100644 |
2741 | --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |
2742 | +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |
2743 | @@ -2864,7 +2864,7 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, |
2744 | if (hwmgr->is_kicker) |
2745 | switch_limit_us = data->is_memory_gddr5 ? 450 : 150; |
2746 | else |
2747 | - switch_limit_us = data->is_memory_gddr5 ? 190 : 150; |
2748 | + switch_limit_us = data->is_memory_gddr5 ? 200 : 150; |
2749 | break; |
2750 | case CHIP_VEGAM: |
2751 | switch_limit_us = 30; |
2752 | diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c |
2753 | index 6e81e5db57f25..b050fd1f3d201 100644 |
2754 | --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c |
2755 | +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c |
2756 | @@ -295,8 +295,12 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, |
2757 | const struct i2c_device_id *id) |
2758 | { |
2759 | struct device *dev = &stdp4028_i2c->dev; |
2760 | + int ret; |
2761 | + |
2762 | + ret = ge_b850v3_lvds_init(dev); |
2763 | |
2764 | - ge_b850v3_lvds_init(dev); |
2765 | + if (ret) |
2766 | + return ret; |
2767 | |
2768 | ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c; |
2769 | i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr); |
2770 | @@ -354,8 +358,12 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c, |
2771 | const struct i2c_device_id *id) |
2772 | { |
2773 | struct device *dev = &stdp2690_i2c->dev; |
2774 | + int ret; |
2775 | + |
2776 | + ret = ge_b850v3_lvds_init(dev); |
2777 | |
2778 | - ge_b850v3_lvds_init(dev); |
2779 | + if (ret) |
2780 | + return ret; |
2781 | |
2782 | ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c; |
2783 | i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr); |
2784 | diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c |
2785 | index 675442bfc1bd7..77384c49fb8dd 100644 |
2786 | --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c |
2787 | +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c |
2788 | @@ -365,7 +365,6 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi, |
2789 | if (lpm) |
2790 | val |= CMD_MODE_ALL_LP; |
2791 | |
2792 | - dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS); |
2793 | dsi_write(dsi, DSI_CMD_MODE_CFG, val); |
2794 | } |
2795 | |
2796 | @@ -541,16 +540,22 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi) |
2797 | static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi, |
2798 | unsigned long mode_flags) |
2799 | { |
2800 | + u32 val; |
2801 | + |
2802 | dsi_write(dsi, DSI_PWR_UP, RESET); |
2803 | |
2804 | if (mode_flags & MIPI_DSI_MODE_VIDEO) { |
2805 | dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE); |
2806 | dw_mipi_dsi_video_mode_config(dsi); |
2807 | - dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS); |
2808 | } else { |
2809 | dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE); |
2810 | } |
2811 | |
2812 | + val = PHY_TXREQUESTCLKHS; |
2813 | + if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) |
2814 | + val |= AUTO_CLKLANE_CTRL; |
2815 | + dsi_write(dsi, DSI_LPCLK_CTRL, val); |
2816 | + |
2817 | dsi_write(dsi, DSI_PWR_UP, POWERUP); |
2818 | } |
2819 | |
2820 | diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h |
2821 | index 89b6112bd66b0..126a0eb6e0542 100644 |
2822 | --- a/drivers/gpu/drm/i915/i915_drv.h |
2823 | +++ b/drivers/gpu/drm/i915/i915_drv.h |
2824 | @@ -33,6 +33,8 @@ |
2825 | #include <uapi/drm/i915_drm.h> |
2826 | #include <uapi/drm/drm_fourcc.h> |
2827 | |
2828 | +#include <asm/hypervisor.h> |
2829 | + |
2830 | #include <linux/io-mapping.h> |
2831 | #include <linux/i2c.h> |
2832 | #include <linux/i2c-algo-bit.h> |
2833 | @@ -2197,7 +2199,9 @@ static inline bool intel_vtd_active(void) |
2834 | if (intel_iommu_gfx_mapped) |
2835 | return true; |
2836 | #endif |
2837 | - return false; |
2838 | + |
2839 | + /* Running as a guest, we assume the host is enforcing VT'd */ |
2840 | + return !hypervisor_is_type(X86_HYPER_NATIVE); |
2841 | } |
2842 | |
2843 | static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) |
2844 | diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c |
2845 | index 3ce8ad7603c7f..3ffcbaa138788 100644 |
2846 | --- a/drivers/gpu/drm/ttm/ttm_bo.c |
2847 | +++ b/drivers/gpu/drm/ttm/ttm_bo.c |
2848 | @@ -761,7 +761,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, |
2849 | /* Don't evict this BO if it's outside of the |
2850 | * requested placement range |
2851 | */ |
2852 | - if (place->fpfn >= (bo->mem.start + bo->mem.size) || |
2853 | + if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) || |
2854 | (place->lpfn && place->lpfn <= bo->mem.start)) |
2855 | return false; |
2856 | |
2857 | diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c |
2858 | index 1c96809b51c90..b74acbd5997b5 100644 |
2859 | --- a/drivers/hid/wacom_wac.c |
2860 | +++ b/drivers/hid/wacom_wac.c |
2861 | @@ -2773,7 +2773,9 @@ static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *repo |
2862 | if (report->type != HID_INPUT_REPORT) |
2863 | return -1; |
2864 | |
2865 | - if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input) |
2866 | + if (WACOM_PAD_FIELD(field)) |
2867 | + return 0; |
2868 | + else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input) |
2869 | wacom_wac_pen_report(hdev, report); |
2870 | else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input) |
2871 | wacom_wac_finger_report(hdev, report); |
2872 | diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h |
2873 | index 82e563cdc8794..dfd24b85a5775 100644 |
2874 | --- a/drivers/hwtracing/coresight/coresight-priv.h |
2875 | +++ b/drivers/hwtracing/coresight/coresight-priv.h |
2876 | @@ -147,7 +147,8 @@ static inline void coresight_write_reg_pair(void __iomem *addr, u64 val, |
2877 | void coresight_disable_path(struct list_head *path); |
2878 | int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data); |
2879 | struct coresight_device *coresight_get_sink(struct list_head *path); |
2880 | -struct coresight_device *coresight_get_enabled_sink(bool reset); |
2881 | +struct coresight_device * |
2882 | +coresight_get_enabled_sink(struct coresight_device *source); |
2883 | struct coresight_device *coresight_get_sink_by_id(u32 id); |
2884 | struct list_head *coresight_build_path(struct coresight_device *csdev, |
2885 | struct coresight_device *sink); |
2886 | diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c |
2887 | index 0bbce0d291582..90ecd04a2f20b 100644 |
2888 | --- a/drivers/hwtracing/coresight/coresight.c |
2889 | +++ b/drivers/hwtracing/coresight/coresight.c |
2890 | @@ -481,50 +481,46 @@ struct coresight_device *coresight_get_sink(struct list_head *path) |
2891 | return csdev; |
2892 | } |
2893 | |
2894 | -static int coresight_enabled_sink(struct device *dev, const void *data) |
2895 | +static struct coresight_device * |
2896 | +coresight_find_enabled_sink(struct coresight_device *csdev) |
2897 | { |
2898 | - const bool *reset = data; |
2899 | - struct coresight_device *csdev = to_coresight_device(dev); |
2900 | + int i; |
2901 | + struct coresight_device *sink; |
2902 | |
2903 | if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || |
2904 | csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && |
2905 | - csdev->activated) { |
2906 | - /* |
2907 | - * Now that we have a handle on the sink for this session, |
2908 | - * disable the sysFS "enable_sink" flag so that possible |
2909 | - * concurrent perf session that wish to use another sink don't |
2910 | - * trip on it. Doing so has no ramification for the current |
2911 | - * session. |
2912 | - */ |
2913 | - if (*reset) |
2914 | - csdev->activated = false; |
2915 | + csdev->activated) |
2916 | + return csdev; |
2917 | |
2918 | - return 1; |
2919 | + /* |
2920 | + * Recursively explore each port found on this element. |
2921 | + */ |
2922 | + for (i = 0; i < csdev->pdata->nr_outport; i++) { |
2923 | + struct coresight_device *child_dev; |
2924 | + |
2925 | + child_dev = csdev->pdata->conns[i].child_dev; |
2926 | + if (child_dev) |
2927 | + sink = coresight_find_enabled_sink(child_dev); |
2928 | + if (sink) |
2929 | + return sink; |
2930 | } |
2931 | |
2932 | - return 0; |
2933 | + return NULL; |
2934 | } |
2935 | |
2936 | /** |
2937 | - * coresight_get_enabled_sink - returns the first enabled sink found on the bus |
2938 | - * @deactivate: Whether the 'enable_sink' flag should be reset |
2939 | + * coresight_get_enabled_sink - returns the first enabled sink using |
2940 | + * connection based search starting from the source reference |
2941 | * |
2942 | - * When operated from perf the deactivate parameter should be set to 'true'. |
2943 | - * That way the "enabled_sink" flag of the sink that was selected can be reset, |
2944 | - * allowing for other concurrent perf sessions to choose a different sink. |
2945 | - * |
2946 | - * When operated from sysFS users have full control and as such the deactivate |
2947 | - * parameter should be set to 'false', hence mandating users to explicitly |
2948 | - * clear the flag. |
2949 | + * @source: Coresight source device reference |
2950 | */ |
2951 | -struct coresight_device *coresight_get_enabled_sink(bool deactivate) |
2952 | +struct coresight_device * |
2953 | +coresight_get_enabled_sink(struct coresight_device *source) |
2954 | { |
2955 | - struct device *dev = NULL; |
2956 | - |
2957 | - dev = bus_find_device(&coresight_bustype, NULL, &deactivate, |
2958 | - coresight_enabled_sink); |
2959 | + if (!source) |
2960 | + return NULL; |
2961 | |
2962 | - return dev ? to_coresight_device(dev) : NULL; |
2963 | + return coresight_find_enabled_sink(source); |
2964 | } |
2965 | |
2966 | static int coresight_sink_by_id(struct device *dev, const void *data) |
2967 | @@ -764,11 +760,7 @@ int coresight_enable(struct coresight_device *csdev) |
2968 | goto out; |
2969 | } |
2970 | |
2971 | - /* |
2972 | - * Search for a valid sink for this session but don't reset the |
2973 | - * "enable_sink" flag in sysFS. Users get to do that explicitly. |
2974 | - */ |
2975 | - sink = coresight_get_enabled_sink(false); |
2976 | + sink = coresight_get_enabled_sink(csdev); |
2977 | if (!sink) { |
2978 | ret = -EINVAL; |
2979 | goto out; |
2980 | diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c |
2981 | index a3b61336fe557..9543c9816eed9 100644 |
2982 | --- a/drivers/i2c/busses/i2c-imx.c |
2983 | +++ b/drivers/i2c/busses/i2c-imx.c |
2984 | @@ -1112,14 +1112,6 @@ static int i2c_imx_probe(struct platform_device *pdev) |
2985 | return ret; |
2986 | } |
2987 | |
2988 | - /* Request IRQ */ |
2989 | - ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED, |
2990 | - pdev->name, i2c_imx); |
2991 | - if (ret) { |
2992 | - dev_err(&pdev->dev, "can't claim irq %d\n", irq); |
2993 | - goto clk_disable; |
2994 | - } |
2995 | - |
2996 | /* Init queue */ |
2997 | init_waitqueue_head(&i2c_imx->queue); |
2998 | |
2999 | @@ -1138,6 +1130,14 @@ static int i2c_imx_probe(struct platform_device *pdev) |
3000 | if (ret < 0) |
3001 | goto rpm_disable; |
3002 | |
3003 | + /* Request IRQ */ |
3004 | + ret = request_threaded_irq(irq, i2c_imx_isr, NULL, IRQF_SHARED, |
3005 | + pdev->name, i2c_imx); |
3006 | + if (ret) { |
3007 | + dev_err(&pdev->dev, "can't claim irq %d\n", irq); |
3008 | + goto rpm_disable; |
3009 | + } |
3010 | + |
3011 | /* Set up clock divider */ |
3012 | i2c_imx->bitrate = IMX_I2C_BIT_RATE; |
3013 | ret = of_property_read_u32(pdev->dev.of_node, |
3014 | @@ -1180,13 +1180,12 @@ static int i2c_imx_probe(struct platform_device *pdev) |
3015 | |
3016 | clk_notifier_unregister: |
3017 | clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); |
3018 | + free_irq(irq, i2c_imx); |
3019 | rpm_disable: |
3020 | pm_runtime_put_noidle(&pdev->dev); |
3021 | pm_runtime_disable(&pdev->dev); |
3022 | pm_runtime_set_suspended(&pdev->dev); |
3023 | pm_runtime_dont_use_autosuspend(&pdev->dev); |
3024 | - |
3025 | -clk_disable: |
3026 | clk_disable_unprepare(i2c_imx->clk); |
3027 | return ret; |
3028 | } |
3029 | @@ -1194,7 +1193,7 @@ clk_disable: |
3030 | static int i2c_imx_remove(struct platform_device *pdev) |
3031 | { |
3032 | struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev); |
3033 | - int ret; |
3034 | + int irq, ret; |
3035 | |
3036 | ret = pm_runtime_get_sync(&pdev->dev); |
3037 | if (ret < 0) |
3038 | @@ -1214,6 +1213,9 @@ static int i2c_imx_remove(struct platform_device *pdev) |
3039 | imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR); |
3040 | |
3041 | clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); |
3042 | + irq = platform_get_irq(pdev, 0); |
3043 | + if (irq >= 0) |
3044 | + free_irq(irq, i2c_imx); |
3045 | clk_disable_unprepare(i2c_imx->clk); |
3046 | |
3047 | pm_runtime_put_noidle(&pdev->dev); |
3048 | diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c |
3049 | index c37f201294b2a..b1fb1fd763fcd 100644 |
3050 | --- a/drivers/iio/adc/rcar-gyroadc.c |
3051 | +++ b/drivers/iio/adc/rcar-gyroadc.c |
3052 | @@ -357,7 +357,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) |
3053 | num_channels = ARRAY_SIZE(rcar_gyroadc_iio_channels_3); |
3054 | break; |
3055 | default: |
3056 | - return -EINVAL; |
3057 | + goto err_e_inval; |
3058 | } |
3059 | |
3060 | /* |
3061 | @@ -374,7 +374,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) |
3062 | dev_err(dev, |
3063 | "Failed to get child reg property of ADC \"%pOFn\".\n", |
3064 | child); |
3065 | - return ret; |
3066 | + goto err_of_node_put; |
3067 | } |
3068 | |
3069 | /* Channel number is too high. */ |
3070 | @@ -382,7 +382,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) |
3071 | dev_err(dev, |
3072 | "Only %i channels supported with %pOFn, but reg = <%i>.\n", |
3073 | num_channels, child, reg); |
3074 | - return -EINVAL; |
3075 | + goto err_e_inval; |
3076 | } |
3077 | } |
3078 | |
3079 | @@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) |
3080 | dev_err(dev, |
3081 | "Channel %i uses different ADC mode than the rest.\n", |
3082 | reg); |
3083 | - return -EINVAL; |
3084 | + goto err_e_inval; |
3085 | } |
3086 | |
3087 | /* Channel is valid, grab the regulator. */ |
3088 | @@ -401,7 +401,8 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) |
3089 | if (IS_ERR(vref)) { |
3090 | dev_dbg(dev, "Channel %i 'vref' supply not connected.\n", |
3091 | reg); |
3092 | - return PTR_ERR(vref); |
3093 | + ret = PTR_ERR(vref); |
3094 | + goto err_of_node_put; |
3095 | } |
3096 | |
3097 | priv->vref[reg] = vref; |
3098 | @@ -425,8 +426,10 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) |
3099 | * attached to the GyroADC at a time, so if we found it, |
3100 | * we can stop parsing here. |
3101 | */ |
3102 | - if (childmode == RCAR_GYROADC_MODE_SELECT_1_MB88101A) |
3103 | + if (childmode == RCAR_GYROADC_MODE_SELECT_1_MB88101A) { |
3104 | + of_node_put(child); |
3105 | break; |
3106 | + } |
3107 | } |
3108 | |
3109 | if (first) { |
3110 | @@ -435,6 +438,12 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) |
3111 | } |
3112 | |
3113 | return 0; |
3114 | + |
3115 | +err_e_inval: |
3116 | + ret = -EINVAL; |
3117 | +err_of_node_put: |
3118 | + of_node_put(child); |
3119 | + return ret; |
3120 | } |
3121 | |
3122 | static void rcar_gyroadc_deinit_supplies(struct iio_dev *indio_dev) |
3123 | diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c |
3124 | index 6ea39f4bbb370..55abd2fd2ccc8 100644 |
3125 | --- a/drivers/iio/adc/ti-adc0832.c |
3126 | +++ b/drivers/iio/adc/ti-adc0832.c |
3127 | @@ -28,6 +28,12 @@ struct adc0832 { |
3128 | struct regulator *reg; |
3129 | struct mutex lock; |
3130 | u8 mux_bits; |
3131 | + /* |
3132 | + * Max size needed: 16x 1 byte ADC data + 8 bytes timestamp |
3133 | + * May be shorter if not all channels are enabled subject |
3134 | + * to the timestamp remaining 8 byte aligned. |
3135 | + */ |
3136 | + u8 data[24] __aligned(8); |
3137 | |
3138 | u8 tx_buf[2] ____cacheline_aligned; |
3139 | u8 rx_buf[2]; |
3140 | @@ -199,7 +205,6 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p) |
3141 | struct iio_poll_func *pf = p; |
3142 | struct iio_dev *indio_dev = pf->indio_dev; |
3143 | struct adc0832 *adc = iio_priv(indio_dev); |
3144 | - u8 data[24] = { }; /* 16x 1 byte ADC data + 8 bytes timestamp */ |
3145 | int scan_index; |
3146 | int i = 0; |
3147 | |
3148 | @@ -217,10 +222,10 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p) |
3149 | goto out; |
3150 | } |
3151 | |
3152 | - data[i] = ret; |
3153 | + adc->data[i] = ret; |
3154 | i++; |
3155 | } |
3156 | - iio_push_to_buffers_with_timestamp(indio_dev, data, |
3157 | + iio_push_to_buffers_with_timestamp(indio_dev, adc->data, |
3158 | iio_get_time_ns(indio_dev)); |
3159 | out: |
3160 | mutex_unlock(&adc->lock); |
3161 | diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c |
3162 | index 68a9dcb8faa2f..db476482cc755 100644 |
3163 | --- a/drivers/iio/adc/ti-adc12138.c |
3164 | +++ b/drivers/iio/adc/ti-adc12138.c |
3165 | @@ -47,6 +47,12 @@ struct adc12138 { |
3166 | struct completion complete; |
3167 | /* The number of cclk periods for the S/H's acquisition time */ |
3168 | unsigned int acquisition_time; |
3169 | + /* |
3170 | + * Maximum size needed: 16x 2 bytes ADC data + 8 bytes timestamp. |
3171 | + * Less may be need if not all channels are enabled, as long as |
3172 | + * the 8 byte alignment of the timestamp is maintained. |
3173 | + */ |
3174 | + __be16 data[20] __aligned(8); |
3175 | |
3176 | u8 tx_buf[2] ____cacheline_aligned; |
3177 | u8 rx_buf[2]; |
3178 | @@ -329,7 +335,6 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) |
3179 | struct iio_poll_func *pf = p; |
3180 | struct iio_dev *indio_dev = pf->indio_dev; |
3181 | struct adc12138 *adc = iio_priv(indio_dev); |
3182 | - __be16 data[20] = { }; /* 16x 2 bytes ADC data + 8 bytes timestamp */ |
3183 | __be16 trash; |
3184 | int ret; |
3185 | int scan_index; |
3186 | @@ -345,7 +350,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) |
3187 | reinit_completion(&adc->complete); |
3188 | |
3189 | ret = adc12138_start_and_read_conv(adc, scan_chan, |
3190 | - i ? &data[i - 1] : &trash); |
3191 | + i ? &adc->data[i - 1] : &trash); |
3192 | if (ret) { |
3193 | dev_warn(&adc->spi->dev, |
3194 | "failed to start conversion\n"); |
3195 | @@ -362,7 +367,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) |
3196 | } |
3197 | |
3198 | if (i) { |
3199 | - ret = adc12138_read_conv_data(adc, &data[i - 1]); |
3200 | + ret = adc12138_read_conv_data(adc, &adc->data[i - 1]); |
3201 | if (ret) { |
3202 | dev_warn(&adc->spi->dev, |
3203 | "failed to get conversion data\n"); |
3204 | @@ -370,7 +375,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) |
3205 | } |
3206 | } |
3207 | |
3208 | - iio_push_to_buffers_with_timestamp(indio_dev, data, |
3209 | + iio_push_to_buffers_with_timestamp(indio_dev, adc->data, |
3210 | iio_get_time_ns(indio_dev)); |
3211 | out: |
3212 | mutex_unlock(&adc->lock); |
3213 | diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c |
3214 | index d3fbe9d86467c..1c3c1bd53374a 100644 |
3215 | --- a/drivers/iio/gyro/itg3200_buffer.c |
3216 | +++ b/drivers/iio/gyro/itg3200_buffer.c |
3217 | @@ -46,13 +46,20 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p) |
3218 | struct iio_poll_func *pf = p; |
3219 | struct iio_dev *indio_dev = pf->indio_dev; |
3220 | struct itg3200 *st = iio_priv(indio_dev); |
3221 | - __be16 buf[ITG3200_SCAN_ELEMENTS + sizeof(s64)/sizeof(u16)]; |
3222 | - |
3223 | - int ret = itg3200_read_all_channels(st->i2c, buf); |
3224 | + /* |
3225 | + * Ensure correct alignment and padding including for the |
3226 | + * timestamp that may be inserted. |
3227 | + */ |
3228 | + struct { |
3229 | + __be16 buf[ITG3200_SCAN_ELEMENTS]; |
3230 | + s64 ts __aligned(8); |
3231 | + } scan; |
3232 | + |
3233 | + int ret = itg3200_read_all_channels(st->i2c, scan.buf); |
3234 | if (ret < 0) |
3235 | goto error_ret; |
3236 | |
3237 | - iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp); |
3238 | + iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp); |
3239 | |
3240 | iio_trigger_notify_done(indio_dev->trig); |
3241 | |
3242 | diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c |
3243 | index 982bba0c54e7d..cb40345e7b51d 100644 |
3244 | --- a/drivers/iio/light/si1145.c |
3245 | +++ b/drivers/iio/light/si1145.c |
3246 | @@ -169,6 +169,7 @@ struct si1145_part_info { |
3247 | * @part_info: Part information |
3248 | * @trig: Pointer to iio trigger |
3249 | * @meas_rate: Value of MEAS_RATE register. Only set in HW in auto mode |
3250 | + * @buffer: Used to pack data read from sensor. |
3251 | */ |
3252 | struct si1145_data { |
3253 | struct i2c_client *client; |
3254 | @@ -180,6 +181,14 @@ struct si1145_data { |
3255 | bool autonomous; |
3256 | struct iio_trigger *trig; |
3257 | int meas_rate; |
3258 | + /* |
3259 | + * Ensure timestamp will be naturally aligned if present. |
3260 | + * Maximum buffer size (may be only partly used if not all |
3261 | + * channels are enabled): |
3262 | + * 6*2 bytes channels data + 4 bytes alignment + |
3263 | + * 8 bytes timestamp |
3264 | + */ |
3265 | + u8 buffer[24] __aligned(8); |
3266 | }; |
3267 | |
3268 | /** |
3269 | @@ -441,12 +450,6 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private) |
3270 | struct iio_poll_func *pf = private; |
3271 | struct iio_dev *indio_dev = pf->indio_dev; |
3272 | struct si1145_data *data = iio_priv(indio_dev); |
3273 | - /* |
3274 | - * Maximum buffer size: |
3275 | - * 6*2 bytes channels data + 4 bytes alignment + |
3276 | - * 8 bytes timestamp |
3277 | - */ |
3278 | - u8 buffer[24]; |
3279 | int i, j = 0; |
3280 | int ret; |
3281 | u8 irq_status = 0; |
3282 | @@ -479,7 +482,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private) |
3283 | |
3284 | ret = i2c_smbus_read_i2c_block_data_or_emulated( |
3285 | data->client, indio_dev->channels[i].address, |
3286 | - sizeof(u16) * run, &buffer[j]); |
3287 | + sizeof(u16) * run, &data->buffer[j]); |
3288 | if (ret < 0) |
3289 | goto done; |
3290 | j += run * sizeof(u16); |
3291 | @@ -494,7 +497,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private) |
3292 | goto done; |
3293 | } |
3294 | |
3295 | - iio_push_to_buffers_with_timestamp(indio_dev, buffer, |
3296 | + iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, |
3297 | iio_get_time_ns(indio_dev)); |
3298 | |
3299 | done: |
3300 | diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c |
3301 | index e521f3c3dbbf1..653ddf30973ec 100644 |
3302 | --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c |
3303 | +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c |
3304 | @@ -727,6 +727,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id) |
3305 | listener->qed_handle); |
3306 | |
3307 | cm_id->rem_ref(cm_id); |
3308 | + kfree(listener); |
3309 | return rc; |
3310 | } |
3311 | |
3312 | diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c |
3313 | index e1423f7648d61..4c039e4125d92 100644 |
3314 | --- a/drivers/input/serio/hil_mlc.c |
3315 | +++ b/drivers/input/serio/hil_mlc.c |
3316 | @@ -74,7 +74,7 @@ EXPORT_SYMBOL(hil_mlc_unregister); |
3317 | static LIST_HEAD(hil_mlcs); |
3318 | static DEFINE_RWLOCK(hil_mlcs_lock); |
3319 | static struct timer_list hil_mlcs_kicker; |
3320 | -static int hil_mlcs_probe; |
3321 | +static int hil_mlcs_probe, hil_mlc_stop; |
3322 | |
3323 | static void hil_mlcs_process(unsigned long unused); |
3324 | static DECLARE_TASKLET_DISABLED(hil_mlcs_tasklet, hil_mlcs_process, 0); |
3325 | @@ -702,9 +702,13 @@ static int hilse_donode(hil_mlc *mlc) |
3326 | if (!mlc->ostarted) { |
3327 | mlc->ostarted = 1; |
3328 | mlc->opacket = pack; |
3329 | - mlc->out(mlc); |
3330 | + rc = mlc->out(mlc); |
3331 | nextidx = HILSEN_DOZE; |
3332 | write_unlock_irqrestore(&mlc->lock, flags); |
3333 | + if (rc) { |
3334 | + hil_mlc_stop = 1; |
3335 | + return 1; |
3336 | + } |
3337 | break; |
3338 | } |
3339 | mlc->ostarted = 0; |
3340 | @@ -715,8 +719,13 @@ static int hilse_donode(hil_mlc *mlc) |
3341 | |
3342 | case HILSE_CTS: |
3343 | write_lock_irqsave(&mlc->lock, flags); |
3344 | - nextidx = mlc->cts(mlc) ? node->bad : node->good; |
3345 | + rc = mlc->cts(mlc); |
3346 | + nextidx = rc ? node->bad : node->good; |
3347 | write_unlock_irqrestore(&mlc->lock, flags); |
3348 | + if (rc) { |
3349 | + hil_mlc_stop = 1; |
3350 | + return 1; |
3351 | + } |
3352 | break; |
3353 | |
3354 | default: |
3355 | @@ -780,6 +789,12 @@ static void hil_mlcs_process(unsigned long unused) |
3356 | |
3357 | static void hil_mlcs_timer(struct timer_list *unused) |
3358 | { |
3359 | + if (hil_mlc_stop) { |
3360 | + /* could not send packet - stop immediately. */ |
3361 | + pr_warn(PREFIX "HIL seems stuck - Disabling HIL MLC.\n"); |
3362 | + return; |
3363 | + } |
3364 | + |
3365 | hil_mlcs_probe = 1; |
3366 | tasklet_schedule(&hil_mlcs_tasklet); |
3367 | /* Re-insert the periodic task. */ |
3368 | diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c |
3369 | index 232d30c825bd1..3e85e90393746 100644 |
3370 | --- a/drivers/input/serio/hp_sdc_mlc.c |
3371 | +++ b/drivers/input/serio/hp_sdc_mlc.c |
3372 | @@ -210,7 +210,7 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc) |
3373 | priv->tseq[2] = 1; |
3374 | priv->tseq[3] = 0; |
3375 | priv->tseq[4] = 0; |
3376 | - __hp_sdc_enqueue_transaction(&priv->trans); |
3377 | + return __hp_sdc_enqueue_transaction(&priv->trans); |
3378 | busy: |
3379 | return 1; |
3380 | done: |
3381 | @@ -219,7 +219,7 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc) |
3382 | return 0; |
3383 | } |
3384 | |
3385 | -static void hp_sdc_mlc_out(hil_mlc *mlc) |
3386 | +static int hp_sdc_mlc_out(hil_mlc *mlc) |
3387 | { |
3388 | struct hp_sdc_mlc_priv_s *priv; |
3389 | |
3390 | @@ -234,7 +234,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc) |
3391 | do_data: |
3392 | if (priv->emtestmode) { |
3393 | up(&mlc->osem); |
3394 | - return; |
3395 | + return 0; |
3396 | } |
3397 | /* Shouldn't be sending commands when loop may be busy */ |
3398 | BUG_ON(down_trylock(&mlc->csem)); |
3399 | @@ -296,7 +296,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc) |
3400 | BUG_ON(down_trylock(&mlc->csem)); |
3401 | } |
3402 | enqueue: |
3403 | - hp_sdc_enqueue_transaction(&priv->trans); |
3404 | + return hp_sdc_enqueue_transaction(&priv->trans); |
3405 | } |
3406 | |
3407 | static int __init hp_sdc_mlc_init(void) |
3408 | diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c |
3409 | index c50d34e2b0983..de932755bcb34 100644 |
3410 | --- a/drivers/leds/leds-bcm6328.c |
3411 | +++ b/drivers/leds/leds-bcm6328.c |
3412 | @@ -332,7 +332,7 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg, |
3413 | led->cdev.brightness_set = bcm6328_led_set; |
3414 | led->cdev.blink_set = bcm6328_blink_set; |
3415 | |
3416 | - rc = led_classdev_register(dev, &led->cdev); |
3417 | + rc = devm_led_classdev_register(dev, &led->cdev); |
3418 | if (rc < 0) |
3419 | return rc; |
3420 | |
3421 | diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c |
3422 | index aec285fd21c05..dbb8953334d90 100644 |
3423 | --- a/drivers/leds/leds-bcm6358.c |
3424 | +++ b/drivers/leds/leds-bcm6358.c |
3425 | @@ -137,7 +137,7 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg, |
3426 | |
3427 | led->cdev.brightness_set = bcm6358_led_set; |
3428 | |
3429 | - rc = led_classdev_register(dev, &led->cdev); |
3430 | + rc = devm_led_classdev_register(dev, &led->cdev); |
3431 | if (rc < 0) |
3432 | return rc; |
3433 | |
3434 | diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c |
3435 | index 7227d03dbbea7..0a6c200e3dcb2 100644 |
3436 | --- a/drivers/md/md-bitmap.c |
3437 | +++ b/drivers/md/md-bitmap.c |
3438 | @@ -1372,7 +1372,7 @@ __acquires(bitmap->lock) |
3439 | if (bitmap->bp[page].hijacked || |
3440 | bitmap->bp[page].map == NULL) |
3441 | csize = ((sector_t)1) << (bitmap->chunkshift + |
3442 | - PAGE_COUNTER_SHIFT - 1); |
3443 | + PAGE_COUNTER_SHIFT); |
3444 | else |
3445 | csize = ((sector_t)1) << bitmap->chunkshift; |
3446 | *blocks = csize - (offset & (csize - 1)); |
3447 | diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
3448 | index 02acd5d5a8488..08a7f97750f7a 100644 |
3449 | --- a/drivers/md/raid5.c |
3450 | +++ b/drivers/md/raid5.c |
3451 | @@ -2407,8 +2407,6 @@ static int resize_stripes(struct r5conf *conf, int newsize) |
3452 | } else |
3453 | err = -ENOMEM; |
3454 | |
3455 | - mutex_unlock(&conf->cache_size_mutex); |
3456 | - |
3457 | conf->slab_cache = sc; |
3458 | conf->active_name = 1-conf->active_name; |
3459 | |
3460 | @@ -2431,6 +2429,8 @@ static int resize_stripes(struct r5conf *conf, int newsize) |
3461 | |
3462 | if (!err) |
3463 | conf->pool_size = newsize; |
3464 | + mutex_unlock(&conf->cache_size_mutex); |
3465 | + |
3466 | return err; |
3467 | } |
3468 | |
3469 | diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c |
3470 | index 6011cec5e351d..e6aa9f32b6a83 100644 |
3471 | --- a/drivers/media/i2c/imx274.c |
3472 | +++ b/drivers/media/i2c/imx274.c |
3473 | @@ -1235,6 +1235,8 @@ static int imx274_s_frame_interval(struct v4l2_subdev *sd, |
3474 | ret = imx274_set_frame_interval(imx274, fi->interval); |
3475 | |
3476 | if (!ret) { |
3477 | + fi->interval = imx274->frame_interval; |
3478 | + |
3479 | /* |
3480 | * exposure time range is decided by frame interval |
3481 | * need to update it after frame interval changes |
3482 | @@ -1730,9 +1732,9 @@ static int imx274_set_frame_interval(struct stimx274 *priv, |
3483 | __func__, frame_interval.numerator, |
3484 | frame_interval.denominator); |
3485 | |
3486 | - if (frame_interval.numerator == 0) { |
3487 | - err = -EINVAL; |
3488 | - goto fail; |
3489 | + if (frame_interval.numerator == 0 || frame_interval.denominator == 0) { |
3490 | + frame_interval.denominator = IMX274_DEF_FRAME_RATE; |
3491 | + frame_interval.numerator = 1; |
3492 | } |
3493 | |
3494 | req_frame_rate = (u32)(frame_interval.denominator |
3495 | diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c |
3496 | index 09732eed7eb4f..656142c7a2cc7 100644 |
3497 | --- a/drivers/media/pci/tw5864/tw5864-video.c |
3498 | +++ b/drivers/media/pci/tw5864/tw5864-video.c |
3499 | @@ -767,6 +767,9 @@ static int tw5864_enum_frameintervals(struct file *file, void *priv, |
3500 | fintv->type = V4L2_FRMIVAL_TYPE_STEPWISE; |
3501 | |
3502 | ret = tw5864_frameinterval_get(input, &frameinterval); |
3503 | + if (ret) |
3504 | + return ret; |
3505 | + |
3506 | fintv->stepwise.step = frameinterval; |
3507 | fintv->stepwise.min = frameinterval; |
3508 | fintv->stepwise.max = frameinterval; |
3509 | @@ -785,6 +788,9 @@ static int tw5864_g_parm(struct file *file, void *priv, |
3510 | cp->capability = V4L2_CAP_TIMEPERFRAME; |
3511 | |
3512 | ret = tw5864_frameinterval_get(input, &cp->timeperframe); |
3513 | + if (ret) |
3514 | + return ret; |
3515 | + |
3516 | cp->timeperframe.numerator *= input->frame_interval; |
3517 | cp->capturemode = 0; |
3518 | cp->readbuffers = 2; |
3519 | diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c |
3520 | index ee802fc3bcdfc..9fa1bc5514f3e 100644 |
3521 | --- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c |
3522 | +++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c |
3523 | @@ -571,6 +571,13 @@ static int mtk_jpeg_queue_setup(struct vb2_queue *q, |
3524 | if (!q_data) |
3525 | return -EINVAL; |
3526 | |
3527 | + if (*num_planes) { |
3528 | + for (i = 0; i < *num_planes; i++) |
3529 | + if (sizes[i] < q_data->sizeimage[i]) |
3530 | + return -EINVAL; |
3531 | + return 0; |
3532 | + } |
3533 | + |
3534 | *num_planes = q_data->fmt->colplanes; |
3535 | for (i = 0; i < q_data->fmt->colplanes; i++) { |
3536 | sizes[i] = q_data->sizeimage[i]; |
3537 | diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c |
3538 | index a30a8a731eda8..36abe47997b01 100644 |
3539 | --- a/drivers/media/usb/uvc/uvc_ctrl.c |
3540 | +++ b/drivers/media/usb/uvc/uvc_ctrl.c |
3541 | @@ -1848,30 +1848,35 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain, |
3542 | { |
3543 | struct uvc_entity *entity; |
3544 | struct uvc_control *ctrl; |
3545 | - unsigned int i, found = 0; |
3546 | + unsigned int i; |
3547 | + bool found; |
3548 | u32 reqflags; |
3549 | u16 size; |
3550 | u8 *data = NULL; |
3551 | int ret; |
3552 | |
3553 | /* Find the extension unit. */ |
3554 | + found = false; |
3555 | list_for_each_entry(entity, &chain->entities, chain) { |
3556 | if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT && |
3557 | - entity->id == xqry->unit) |
3558 | + entity->id == xqry->unit) { |
3559 | + found = true; |
3560 | break; |
3561 | + } |
3562 | } |
3563 | |
3564 | - if (entity->id != xqry->unit) { |
3565 | + if (!found) { |
3566 | uvc_trace(UVC_TRACE_CONTROL, "Extension unit %u not found.\n", |
3567 | xqry->unit); |
3568 | return -ENOENT; |
3569 | } |
3570 | |
3571 | /* Find the control and perform delayed initialization if needed. */ |
3572 | + found = false; |
3573 | for (i = 0; i < entity->ncontrols; ++i) { |
3574 | ctrl = &entity->controls[i]; |
3575 | if (ctrl->index == xqry->selector - 1) { |
3576 | - found = 1; |
3577 | + found = true; |
3578 | break; |
3579 | } |
3580 | } |
3581 | @@ -2028,13 +2033,6 @@ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl, |
3582 | goto done; |
3583 | } |
3584 | |
3585 | - /* |
3586 | - * Retrieve control flags from the device. Ignore errors and work with |
3587 | - * default flag values from the uvc_ctrl array when the device doesn't |
3588 | - * properly implement GET_INFO on standard controls. |
3589 | - */ |
3590 | - uvc_ctrl_get_flags(dev, ctrl, &ctrl->info); |
3591 | - |
3592 | ctrl->initialized = 1; |
3593 | |
3594 | uvc_trace(UVC_TRACE_CONTROL, "Added control %pUl/%u to device %s " |
3595 | @@ -2257,6 +2255,13 @@ static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl) |
3596 | if (uvc_entity_match_guid(ctrl->entity, info->entity) && |
3597 | ctrl->index == info->index) { |
3598 | uvc_ctrl_add_info(dev, ctrl, info); |
3599 | + /* |
3600 | + * Retrieve control flags from the device. Ignore errors |
3601 | + * and work with default flag values from the uvc_ctrl |
3602 | + * array when the device doesn't properly implement |
3603 | + * GET_INFO on standard controls. |
3604 | + */ |
3605 | + uvc_ctrl_get_flags(dev, ctrl, &ctrl->info); |
3606 | break; |
3607 | } |
3608 | } |
3609 | diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c |
3610 | index 402c6bc8e621d..af296b6fcbbdc 100644 |
3611 | --- a/drivers/memory/emif.c |
3612 | +++ b/drivers/memory/emif.c |
3613 | @@ -163,35 +163,12 @@ static const struct file_operations emif_mr4_fops = { |
3614 | |
3615 | static int __init_or_module emif_debugfs_init(struct emif_data *emif) |
3616 | { |
3617 | - struct dentry *dentry; |
3618 | - int ret; |
3619 | - |
3620 | - dentry = debugfs_create_dir(dev_name(emif->dev), NULL); |
3621 | - if (!dentry) { |
3622 | - ret = -ENOMEM; |
3623 | - goto err0; |
3624 | - } |
3625 | - emif->debugfs_root = dentry; |
3626 | - |
3627 | - dentry = debugfs_create_file("regcache_dump", S_IRUGO, |
3628 | - emif->debugfs_root, emif, &emif_regdump_fops); |
3629 | - if (!dentry) { |
3630 | - ret = -ENOMEM; |
3631 | - goto err1; |
3632 | - } |
3633 | - |
3634 | - dentry = debugfs_create_file("mr4", S_IRUGO, |
3635 | - emif->debugfs_root, emif, &emif_mr4_fops); |
3636 | - if (!dentry) { |
3637 | - ret = -ENOMEM; |
3638 | - goto err1; |
3639 | - } |
3640 | - |
3641 | + emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL); |
3642 | + debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif, |
3643 | + &emif_regdump_fops); |
3644 | + debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif, |
3645 | + &emif_mr4_fops); |
3646 | return 0; |
3647 | -err1: |
3648 | - debugfs_remove_recursive(emif->debugfs_root); |
3649 | -err0: |
3650 | - return ret; |
3651 | } |
3652 | |
3653 | static void __exit emif_debugfs_exit(struct emif_data *emif) |
3654 | diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c |
3655 | index 1491561d2e5c9..4bc43d8eeb9e8 100644 |
3656 | --- a/drivers/message/fusion/mptscsih.c |
3657 | +++ b/drivers/message/fusion/mptscsih.c |
3658 | @@ -1176,8 +1176,10 @@ mptscsih_remove(struct pci_dev *pdev) |
3659 | MPT_SCSI_HOST *hd; |
3660 | int sz1; |
3661 | |
3662 | - if((hd = shost_priv(host)) == NULL) |
3663 | - return; |
3664 | + if (host == NULL) |
3665 | + hd = NULL; |
3666 | + else |
3667 | + hd = shost_priv(host); |
3668 | |
3669 | mptscsih_shutdown(pdev); |
3670 | |
3671 | @@ -1193,14 +1195,15 @@ mptscsih_remove(struct pci_dev *pdev) |
3672 | "Free'd ScsiLookup (%d) memory\n", |
3673 | ioc->name, sz1)); |
3674 | |
3675 | - kfree(hd->info_kbuf); |
3676 | + if (hd) |
3677 | + kfree(hd->info_kbuf); |
3678 | |
3679 | /* NULL the Scsi_Host pointer |
3680 | */ |
3681 | ioc->sh = NULL; |
3682 | |
3683 | - scsi_host_put(host); |
3684 | - |
3685 | + if (host) |
3686 | + scsi_host_put(host); |
3687 | mpt_detach(pdev); |
3688 | |
3689 | } |
3690 | diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c |
3691 | index b2d924c5e82ee..0dc8eafdc81d0 100644 |
3692 | --- a/drivers/mmc/host/sdhci-acpi.c |
3693 | +++ b/drivers/mmc/host/sdhci-acpi.c |
3694 | @@ -658,6 +658,43 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev, |
3695 | (host->mmc->caps & MMC_CAP_1_8V_DDR)) |
3696 | host->mmc->caps2 = MMC_CAP2_HS400_1_8V; |
3697 | |
3698 | + /* |
3699 | + * There are two types of presets out in the wild: |
3700 | + * 1) Default/broken presets. |
3701 | + * These presets have two sets of problems: |
3702 | + * a) The clock divisor for SDR12, SDR25, and SDR50 is too small. |
3703 | + * This results in clock frequencies that are 2x higher than |
3704 | + * acceptable. i.e., SDR12 = 25 MHz, SDR25 = 50 MHz, SDR50 = |
3705 | + * 100 MHz.x |
3706 | + * b) The HS200 and HS400 driver strengths don't match. |
3707 | + * By default, the SDR104 preset register has a driver strength of |
3708 | + * A, but the (internal) HS400 preset register has a driver |
3709 | + * strength of B. As part of initializing HS400, HS200 tuning |
3710 | + * needs to be performed. Having different driver strengths |
3711 | + * between tuning and operation is wrong. It results in different |
3712 | + * rise/fall times that lead to incorrect sampling. |
3713 | + * 2) Firmware with properly initialized presets. |
3714 | + * These presets have proper clock divisors. i.e., SDR12 => 12MHz, |
3715 | + * SDR25 => 25 MHz, SDR50 => 50 MHz. Additionally the HS200 and |
3716 | + * HS400 preset driver strengths match. |
3717 | + * |
3718 | + * Enabling presets for HS400 doesn't work for the following reasons: |
3719 | + * 1) sdhci_set_ios has a hard coded list of timings that are used |
3720 | + * to determine if presets should be enabled. |
3721 | + * 2) sdhci_get_preset_value is using a non-standard register to |
3722 | + * read out HS400 presets. The AMD controller doesn't support this |
3723 | + * non-standard register. In fact, it doesn't expose the HS400 |
3724 | + * preset register anywhere in the SDHCI memory map. This results |
3725 | + * in reading a garbage value and using the wrong presets. |
3726 | + * |
3727 | + * Since HS400 and HS200 presets must be identical, we could |
3728 | + * instead use the the SDR104 preset register. |
3729 | + * |
3730 | + * If the above issues are resolved we could remove this quirk for |
3731 | + * firmware that that has valid presets (i.e., SDR12 <= 12 MHz). |
3732 | + */ |
3733 | + host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; |
3734 | + |
3735 | host->mmc_host_ops.select_drive_strength = amd_select_drive_strength; |
3736 | host->mmc_host_ops.set_ios = amd_set_ios; |
3737 | host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning; |
3738 | diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c |
3739 | index 66ad46d0ba88b..64196c1b1c8f0 100644 |
3740 | --- a/drivers/mmc/host/sdhci-of-esdhc.c |
3741 | +++ b/drivers/mmc/host/sdhci-of-esdhc.c |
3742 | @@ -1004,6 +1004,17 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) |
3743 | |
3744 | esdhc_tuning_block_enable(host, true); |
3745 | |
3746 | + /* |
3747 | + * The eSDHC controller takes the data timeout value into account |
3748 | + * during tuning. If the SD card is too slow sending the response, the |
3749 | + * timer will expire and a "Buffer Read Ready" interrupt without data |
3750 | + * is triggered. This leads to tuning errors. |
3751 | + * |
3752 | + * Just set the timeout to the maximum value because the core will |
3753 | + * already take care of it in sdhci_send_tuning(). |
3754 | + */ |
3755 | + sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL); |
3756 | + |
3757 | hs400_tuning = host->flags & SDHCI_HS400_TUNING; |
3758 | |
3759 | do { |
3760 | diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c |
3761 | index 91d0cb08238cf..ddea4621cda10 100644 |
3762 | --- a/drivers/mmc/host/sdhci-pci-core.c |
3763 | +++ b/drivers/mmc/host/sdhci-pci-core.c |
3764 | @@ -24,6 +24,8 @@ |
3765 | #include <linux/iopoll.h> |
3766 | #include <linux/gpio.h> |
3767 | #include <linux/pm_runtime.h> |
3768 | +#include <linux/pm_qos.h> |
3769 | +#include <linux/debugfs.h> |
3770 | #include <linux/mmc/slot-gpio.h> |
3771 | #include <linux/mmc/sdhci-pci-data.h> |
3772 | #include <linux/acpi.h> |
3773 | @@ -520,6 +522,8 @@ struct intel_host { |
3774 | bool rpm_retune_ok; |
3775 | u32 glk_rx_ctrl1; |
3776 | u32 glk_tun_val; |
3777 | + u32 active_ltr; |
3778 | + u32 idle_ltr; |
3779 | }; |
3780 | |
3781 | static const guid_t intel_dsm_guid = |
3782 | @@ -764,6 +768,108 @@ static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode) |
3783 | return 0; |
3784 | } |
3785 | |
3786 | +#define INTEL_ACTIVELTR 0x804 |
3787 | +#define INTEL_IDLELTR 0x808 |
3788 | + |
3789 | +#define INTEL_LTR_REQ BIT(15) |
3790 | +#define INTEL_LTR_SCALE_MASK GENMASK(11, 10) |
3791 | +#define INTEL_LTR_SCALE_1US (2 << 10) |
3792 | +#define INTEL_LTR_SCALE_32US (3 << 10) |
3793 | +#define INTEL_LTR_VALUE_MASK GENMASK(9, 0) |
3794 | + |
3795 | +static void intel_cache_ltr(struct sdhci_pci_slot *slot) |
3796 | +{ |
3797 | + struct intel_host *intel_host = sdhci_pci_priv(slot); |
3798 | + struct sdhci_host *host = slot->host; |
3799 | + |
3800 | + intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR); |
3801 | + intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR); |
3802 | +} |
3803 | + |
3804 | +static void intel_ltr_set(struct device *dev, s32 val) |
3805 | +{ |
3806 | + struct sdhci_pci_chip *chip = dev_get_drvdata(dev); |
3807 | + struct sdhci_pci_slot *slot = chip->slots[0]; |
3808 | + struct intel_host *intel_host = sdhci_pci_priv(slot); |
3809 | + struct sdhci_host *host = slot->host; |
3810 | + u32 ltr; |
3811 | + |
3812 | + pm_runtime_get_sync(dev); |
3813 | + |
3814 | + /* |
3815 | + * Program latency tolerance (LTR) accordingly what has been asked |
3816 | + * by the PM QoS layer or disable it in case we were passed |
3817 | + * negative value or PM_QOS_LATENCY_ANY. |
3818 | + */ |
3819 | + ltr = readl(host->ioaddr + INTEL_ACTIVELTR); |
3820 | + |
3821 | + if (val == PM_QOS_LATENCY_ANY || val < 0) { |
3822 | + ltr &= ~INTEL_LTR_REQ; |
3823 | + } else { |
3824 | + ltr |= INTEL_LTR_REQ; |
3825 | + ltr &= ~INTEL_LTR_SCALE_MASK; |
3826 | + ltr &= ~INTEL_LTR_VALUE_MASK; |
3827 | + |
3828 | + if (val > INTEL_LTR_VALUE_MASK) { |
3829 | + val >>= 5; |
3830 | + if (val > INTEL_LTR_VALUE_MASK) |
3831 | + val = INTEL_LTR_VALUE_MASK; |
3832 | + ltr |= INTEL_LTR_SCALE_32US | val; |
3833 | + } else { |
3834 | + ltr |= INTEL_LTR_SCALE_1US | val; |
3835 | + } |
3836 | + } |
3837 | + |
3838 | + if (ltr == intel_host->active_ltr) |
3839 | + goto out; |
3840 | + |
3841 | + writel(ltr, host->ioaddr + INTEL_ACTIVELTR); |
3842 | + writel(ltr, host->ioaddr + INTEL_IDLELTR); |
3843 | + |
3844 | + /* Cache the values into lpss structure */ |
3845 | + intel_cache_ltr(slot); |
3846 | +out: |
3847 | + pm_runtime_put_autosuspend(dev); |
3848 | +} |
3849 | + |
3850 | +static bool intel_use_ltr(struct sdhci_pci_chip *chip) |
3851 | +{ |
3852 | + switch (chip->pdev->device) { |
3853 | + case PCI_DEVICE_ID_INTEL_BYT_EMMC: |
3854 | + case PCI_DEVICE_ID_INTEL_BYT_EMMC2: |
3855 | + case PCI_DEVICE_ID_INTEL_BYT_SDIO: |
3856 | + case PCI_DEVICE_ID_INTEL_BYT_SD: |
3857 | + case PCI_DEVICE_ID_INTEL_BSW_EMMC: |
3858 | + case PCI_DEVICE_ID_INTEL_BSW_SDIO: |
3859 | + case PCI_DEVICE_ID_INTEL_BSW_SD: |
3860 | + return false; |
3861 | + default: |
3862 | + return true; |
3863 | + } |
3864 | +} |
3865 | + |
3866 | +static void intel_ltr_expose(struct sdhci_pci_chip *chip) |
3867 | +{ |
3868 | + struct device *dev = &chip->pdev->dev; |
3869 | + |
3870 | + if (!intel_use_ltr(chip)) |
3871 | + return; |
3872 | + |
3873 | + dev->power.set_latency_tolerance = intel_ltr_set; |
3874 | + dev_pm_qos_expose_latency_tolerance(dev); |
3875 | +} |
3876 | + |
3877 | +static void intel_ltr_hide(struct sdhci_pci_chip *chip) |
3878 | +{ |
3879 | + struct device *dev = &chip->pdev->dev; |
3880 | + |
3881 | + if (!intel_use_ltr(chip)) |
3882 | + return; |
3883 | + |
3884 | + dev_pm_qos_hide_latency_tolerance(dev); |
3885 | + dev->power.set_latency_tolerance = NULL; |
3886 | +} |
3887 | + |
3888 | static void byt_probe_slot(struct sdhci_pci_slot *slot) |
3889 | { |
3890 | struct mmc_host_ops *ops = &slot->host->mmc_host_ops; |
3891 | @@ -778,6 +884,43 @@ static void byt_probe_slot(struct sdhci_pci_slot *slot) |
3892 | ops->start_signal_voltage_switch = intel_start_signal_voltage_switch; |
3893 | |
3894 | device_property_read_u32(dev, "max-frequency", &mmc->f_max); |
3895 | + |
3896 | + if (!mmc->slotno) { |
3897 | + slot->chip->slots[mmc->slotno] = slot; |
3898 | + intel_ltr_expose(slot->chip); |
3899 | + } |
3900 | +} |
3901 | + |
3902 | +static void byt_add_debugfs(struct sdhci_pci_slot *slot) |
3903 | +{ |
3904 | + struct intel_host *intel_host = sdhci_pci_priv(slot); |
3905 | + struct mmc_host *mmc = slot->host->mmc; |
3906 | + struct dentry *dir = mmc->debugfs_root; |
3907 | + |
3908 | + if (!intel_use_ltr(slot->chip)) |
3909 | + return; |
3910 | + |
3911 | + debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr); |
3912 | + debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr); |
3913 | + |
3914 | + intel_cache_ltr(slot); |
3915 | +} |
3916 | + |
3917 | +static int byt_add_host(struct sdhci_pci_slot *slot) |
3918 | +{ |
3919 | + int ret = sdhci_add_host(slot->host); |
3920 | + |
3921 | + if (!ret) |
3922 | + byt_add_debugfs(slot); |
3923 | + return ret; |
3924 | +} |
3925 | + |
3926 | +static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead) |
3927 | +{ |
3928 | + struct mmc_host *mmc = slot->host->mmc; |
3929 | + |
3930 | + if (!mmc->slotno) |
3931 | + intel_ltr_hide(slot->chip); |
3932 | } |
3933 | |
3934 | static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) |
3935 | @@ -859,6 +1002,8 @@ static int glk_emmc_add_host(struct sdhci_pci_slot *slot) |
3936 | if (ret) |
3937 | goto cleanup; |
3938 | |
3939 | + byt_add_debugfs(slot); |
3940 | + |
3941 | return 0; |
3942 | |
3943 | cleanup: |
3944 | @@ -1036,6 +1181,8 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { |
3945 | #endif |
3946 | .allow_runtime_pm = true, |
3947 | .probe_slot = byt_emmc_probe_slot, |
3948 | + .add_host = byt_add_host, |
3949 | + .remove_slot = byt_remove_slot, |
3950 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | |
3951 | SDHCI_QUIRK_NO_LED, |
3952 | .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | |
3953 | @@ -1049,6 +1196,7 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = { |
3954 | .allow_runtime_pm = true, |
3955 | .probe_slot = glk_emmc_probe_slot, |
3956 | .add_host = glk_emmc_add_host, |
3957 | + .remove_slot = byt_remove_slot, |
3958 | #ifdef CONFIG_PM_SLEEP |
3959 | .suspend = sdhci_cqhci_suspend, |
3960 | .resume = sdhci_cqhci_resume, |
3961 | @@ -1079,6 +1227,8 @@ static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = { |
3962 | SDHCI_QUIRK2_PRESET_VALUE_BROKEN, |
3963 | .allow_runtime_pm = true, |
3964 | .probe_slot = ni_byt_sdio_probe_slot, |
3965 | + .add_host = byt_add_host, |
3966 | + .remove_slot = byt_remove_slot, |
3967 | .ops = &sdhci_intel_byt_ops, |
3968 | .priv_size = sizeof(struct intel_host), |
3969 | }; |
3970 | @@ -1096,6 +1246,8 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { |
3971 | SDHCI_QUIRK2_PRESET_VALUE_BROKEN, |
3972 | .allow_runtime_pm = true, |
3973 | .probe_slot = byt_sdio_probe_slot, |
3974 | + .add_host = byt_add_host, |
3975 | + .remove_slot = byt_remove_slot, |
3976 | .ops = &sdhci_intel_byt_ops, |
3977 | .priv_size = sizeof(struct intel_host), |
3978 | }; |
3979 | @@ -1115,6 +1267,8 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { |
3980 | .allow_runtime_pm = true, |
3981 | .own_cd_for_runtime_pm = true, |
3982 | .probe_slot = byt_sd_probe_slot, |
3983 | + .add_host = byt_add_host, |
3984 | + .remove_slot = byt_remove_slot, |
3985 | .ops = &sdhci_intel_byt_ops, |
3986 | .priv_size = sizeof(struct intel_host), |
3987 | }; |
3988 | diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c |
3989 | index 136f9737713d8..a1aeb2e105641 100644 |
3990 | --- a/drivers/mmc/host/sdhci.c |
3991 | +++ b/drivers/mmc/host/sdhci.c |
3992 | @@ -1162,9 +1162,11 @@ static inline void sdhci_auto_cmd_select(struct sdhci_host *host, |
3993 | /* |
3994 | * In case of Version 4.10 or later, use of 'Auto CMD Auto |
3995 | * Select' is recommended rather than use of 'Auto CMD12 |
3996 | - * Enable' or 'Auto CMD23 Enable'. |
3997 | + * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode |
3998 | + * here because some controllers (e.g sdhci-of-dwmshc) expect it. |
3999 | */ |
4000 | - if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) { |
4001 | + if (host->version >= SDHCI_SPEC_410 && host->v4_mode && |
4002 | + (use_cmd12 || use_cmd23)) { |
4003 | *mode |= SDHCI_TRNS_AUTO_SEL; |
4004 | |
4005 | ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
4006 | diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c |
4007 | index 8d96ecba1b553..d12a068b0f9ed 100644 |
4008 | --- a/drivers/mmc/host/via-sdmmc.c |
4009 | +++ b/drivers/mmc/host/via-sdmmc.c |
4010 | @@ -1259,11 +1259,14 @@ static void via_init_sdc_pm(struct via_crdr_mmc_host *host) |
4011 | static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state) |
4012 | { |
4013 | struct via_crdr_mmc_host *host; |
4014 | + unsigned long flags; |
4015 | |
4016 | host = pci_get_drvdata(pcidev); |
4017 | |
4018 | + spin_lock_irqsave(&host->lock, flags); |
4019 | via_save_pcictrlreg(host); |
4020 | via_save_sdcreg(host); |
4021 | + spin_unlock_irqrestore(&host->lock, flags); |
4022 | |
4023 | pci_save_state(pcidev); |
4024 | pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); |
4025 | diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c |
4026 | index 5d77a38dba542..7def041bbe484 100644 |
4027 | --- a/drivers/mtd/ubi/wl.c |
4028 | +++ b/drivers/mtd/ubi/wl.c |
4029 | @@ -1629,6 +1629,19 @@ int ubi_thread(void *u) |
4030 | !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) { |
4031 | set_current_state(TASK_INTERRUPTIBLE); |
4032 | spin_unlock(&ubi->wl_lock); |
4033 | + |
4034 | + /* |
4035 | + * Check kthread_should_stop() after we set the task |
4036 | + * state to guarantee that we either see the stop bit |
4037 | + * and exit or the task state is reset to runnable such |
4038 | + * that it's not scheduled out indefinitely and detects |
4039 | + * the stop bit at kthread_should_stop(). |
4040 | + */ |
4041 | + if (kthread_should_stop()) { |
4042 | + set_current_state(TASK_RUNNING); |
4043 | + break; |
4044 | + } |
4045 | + |
4046 | schedule(); |
4047 | continue; |
4048 | } |
4049 | diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c |
4050 | index aaa7ed1dc97ee..d59c6c87164f4 100644 |
4051 | --- a/drivers/net/can/flexcan.c |
4052 | +++ b/drivers/net/can/flexcan.c |
4053 | @@ -1703,8 +1703,6 @@ static int __maybe_unused flexcan_suspend(struct device *device) |
4054 | err = flexcan_chip_disable(priv); |
4055 | if (err) |
4056 | return err; |
4057 | - |
4058 | - err = pm_runtime_force_suspend(device); |
4059 | } |
4060 | netif_stop_queue(dev); |
4061 | netif_device_detach(dev); |
4062 | @@ -1730,10 +1728,6 @@ static int __maybe_unused flexcan_resume(struct device *device) |
4063 | if (err) |
4064 | return err; |
4065 | } else { |
4066 | - err = pm_runtime_force_resume(device); |
4067 | - if (err) |
4068 | - return err; |
4069 | - |
4070 | err = flexcan_chip_enable(priv); |
4071 | } |
4072 | } |
4073 | @@ -1764,8 +1758,16 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device) |
4074 | struct net_device *dev = dev_get_drvdata(device); |
4075 | struct flexcan_priv *priv = netdev_priv(dev); |
4076 | |
4077 | - if (netif_running(dev) && device_may_wakeup(device)) |
4078 | - flexcan_enable_wakeup_irq(priv, true); |
4079 | + if (netif_running(dev)) { |
4080 | + int err; |
4081 | + |
4082 | + if (device_may_wakeup(device)) |
4083 | + flexcan_enable_wakeup_irq(priv, true); |
4084 | + |
4085 | + err = pm_runtime_force_suspend(device); |
4086 | + if (err) |
4087 | + return err; |
4088 | + } |
4089 | |
4090 | return 0; |
4091 | } |
4092 | @@ -1775,8 +1777,16 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) |
4093 | struct net_device *dev = dev_get_drvdata(device); |
4094 | struct flexcan_priv *priv = netdev_priv(dev); |
4095 | |
4096 | - if (netif_running(dev) && device_may_wakeup(device)) |
4097 | - flexcan_enable_wakeup_irq(priv, false); |
4098 | + if (netif_running(dev)) { |
4099 | + int err; |
4100 | + |
4101 | + err = pm_runtime_force_resume(device); |
4102 | + if (err) |
4103 | + return err; |
4104 | + |
4105 | + if (device_may_wakeup(device)) |
4106 | + flexcan_enable_wakeup_irq(priv, false); |
4107 | + } |
4108 | |
4109 | return 0; |
4110 | } |
4111 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
4112 | index cdd3764760ed9..6f777e9b4b936 100644 |
4113 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
4114 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
4115 | @@ -8375,6 +8375,11 @@ static void bnxt_report_link(struct bnxt *bp) |
4116 | u16 fec; |
4117 | |
4118 | netif_carrier_on(bp->dev); |
4119 | + speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); |
4120 | + if (speed == SPEED_UNKNOWN) { |
4121 | + netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); |
4122 | + return; |
4123 | + } |
4124 | if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) |
4125 | duplex = "full"; |
4126 | else |
4127 | @@ -8387,7 +8392,6 @@ static void bnxt_report_link(struct bnxt *bp) |
4128 | flow_ctrl = "ON - receive"; |
4129 | else |
4130 | flow_ctrl = "none"; |
4131 | - speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); |
4132 | netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", |
4133 | speed, duplex, flow_ctrl); |
4134 | if (bp->flags & BNXT_FLAG_EEE_CAP) |
4135 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c |
4136 | index 7277706847b18..8f0eec9fb17bd 100644 |
4137 | --- a/drivers/net/ethernet/mellanox/mlxsw/core.c |
4138 | +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c |
4139 | @@ -493,6 +493,9 @@ static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, |
4140 | err = mlxsw_emad_transmit(trans->core, trans); |
4141 | if (err == 0) |
4142 | return; |
4143 | + |
4144 | + if (!atomic_dec_and_test(&trans->active)) |
4145 | + return; |
4146 | } else { |
4147 | err = -EIO; |
4148 | } |
4149 | diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c |
4150 | index d6cfd51613ed8..3a44dad87602d 100644 |
4151 | --- a/drivers/net/wan/hdlc_fr.c |
4152 | +++ b/drivers/net/wan/hdlc_fr.c |
4153 | @@ -273,63 +273,69 @@ static inline struct net_device **get_dev_p(struct pvc_device *pvc, |
4154 | |
4155 | static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) |
4156 | { |
4157 | - u16 head_len; |
4158 | struct sk_buff *skb = *skb_p; |
4159 | |
4160 | - switch (skb->protocol) { |
4161 | - case cpu_to_be16(NLPID_CCITT_ANSI_LMI): |
4162 | - head_len = 4; |
4163 | - skb_push(skb, head_len); |
4164 | - skb->data[3] = NLPID_CCITT_ANSI_LMI; |
4165 | - break; |
4166 | - |
4167 | - case cpu_to_be16(NLPID_CISCO_LMI): |
4168 | - head_len = 4; |
4169 | - skb_push(skb, head_len); |
4170 | - skb->data[3] = NLPID_CISCO_LMI; |
4171 | - break; |
4172 | - |
4173 | - case cpu_to_be16(ETH_P_IP): |
4174 | - head_len = 4; |
4175 | - skb_push(skb, head_len); |
4176 | - skb->data[3] = NLPID_IP; |
4177 | - break; |
4178 | - |
4179 | - case cpu_to_be16(ETH_P_IPV6): |
4180 | - head_len = 4; |
4181 | - skb_push(skb, head_len); |
4182 | - skb->data[3] = NLPID_IPV6; |
4183 | - break; |
4184 | - |
4185 | - case cpu_to_be16(ETH_P_802_3): |
4186 | - head_len = 10; |
4187 | - if (skb_headroom(skb) < head_len) { |
4188 | - struct sk_buff *skb2 = skb_realloc_headroom(skb, |
4189 | - head_len); |
4190 | + if (!skb->dev) { /* Control packets */ |
4191 | + switch (dlci) { |
4192 | + case LMI_CCITT_ANSI_DLCI: |
4193 | + skb_push(skb, 4); |
4194 | + skb->data[3] = NLPID_CCITT_ANSI_LMI; |
4195 | + break; |
4196 | + |
4197 | + case LMI_CISCO_DLCI: |
4198 | + skb_push(skb, 4); |
4199 | + skb->data[3] = NLPID_CISCO_LMI; |
4200 | + break; |
4201 | + |
4202 | + default: |
4203 | + return -EINVAL; |
4204 | + } |
4205 | + |
4206 | + } else if (skb->dev->type == ARPHRD_DLCI) { |
4207 | + switch (skb->protocol) { |
4208 | + case htons(ETH_P_IP): |
4209 | + skb_push(skb, 4); |
4210 | + skb->data[3] = NLPID_IP; |
4211 | + break; |
4212 | + |
4213 | + case htons(ETH_P_IPV6): |
4214 | + skb_push(skb, 4); |
4215 | + skb->data[3] = NLPID_IPV6; |
4216 | + break; |
4217 | + |
4218 | + default: |
4219 | + skb_push(skb, 10); |
4220 | + skb->data[3] = FR_PAD; |
4221 | + skb->data[4] = NLPID_SNAP; |
4222 | + /* OUI 00-00-00 indicates an Ethertype follows */ |
4223 | + skb->data[5] = 0x00; |
4224 | + skb->data[6] = 0x00; |
4225 | + skb->data[7] = 0x00; |
4226 | + /* This should be an Ethertype: */ |
4227 | + *(__be16 *)(skb->data + 8) = skb->protocol; |
4228 | + } |
4229 | + |
4230 | + } else if (skb->dev->type == ARPHRD_ETHER) { |
4231 | + if (skb_headroom(skb) < 10) { |
4232 | + struct sk_buff *skb2 = skb_realloc_headroom(skb, 10); |
4233 | if (!skb2) |
4234 | return -ENOBUFS; |
4235 | dev_kfree_skb(skb); |
4236 | skb = *skb_p = skb2; |
4237 | } |
4238 | - skb_push(skb, head_len); |
4239 | + skb_push(skb, 10); |
4240 | skb->data[3] = FR_PAD; |
4241 | skb->data[4] = NLPID_SNAP; |
4242 | - skb->data[5] = FR_PAD; |
4243 | + /* OUI 00-80-C2 stands for the 802.1 organization */ |
4244 | + skb->data[5] = 0x00; |
4245 | skb->data[6] = 0x80; |
4246 | skb->data[7] = 0xC2; |
4247 | + /* PID 00-07 stands for Ethernet frames without FCS */ |
4248 | skb->data[8] = 0x00; |
4249 | - skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */ |
4250 | - break; |
4251 | + skb->data[9] = 0x07; |
4252 | |
4253 | - default: |
4254 | - head_len = 10; |
4255 | - skb_push(skb, head_len); |
4256 | - skb->data[3] = FR_PAD; |
4257 | - skb->data[4] = NLPID_SNAP; |
4258 | - skb->data[5] = FR_PAD; |
4259 | - skb->data[6] = FR_PAD; |
4260 | - skb->data[7] = FR_PAD; |
4261 | - *(__be16*)(skb->data + 8) = skb->protocol; |
4262 | + } else { |
4263 | + return -EINVAL; |
4264 | } |
4265 | |
4266 | dlci_to_q922(skb->data, dlci); |
4267 | @@ -425,8 +431,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) |
4268 | skb_put(skb, pad); |
4269 | memset(skb->data + len, 0, pad); |
4270 | } |
4271 | - skb->protocol = cpu_to_be16(ETH_P_802_3); |
4272 | } |
4273 | + skb->dev = dev; |
4274 | if (!fr_hard_header(&skb, pvc->dlci)) { |
4275 | dev->stats.tx_bytes += skb->len; |
4276 | dev->stats.tx_packets++; |
4277 | @@ -494,10 +500,8 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) |
4278 | memset(skb->data, 0, len); |
4279 | skb_reserve(skb, 4); |
4280 | if (lmi == LMI_CISCO) { |
4281 | - skb->protocol = cpu_to_be16(NLPID_CISCO_LMI); |
4282 | fr_hard_header(&skb, LMI_CISCO_DLCI); |
4283 | } else { |
4284 | - skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI); |
4285 | fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); |
4286 | } |
4287 | data = skb_tail_pointer(skb); |
4288 | diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c |
4289 | index 8ca0a808a644d..04095f91d3014 100644 |
4290 | --- a/drivers/net/wireless/ath/ath10k/htt_rx.c |
4291 | +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c |
4292 | @@ -949,6 +949,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, |
4293 | u8 preamble = 0; |
4294 | u8 group_id; |
4295 | u32 info1, info2, info3; |
4296 | + u32 stbc, nsts_su; |
4297 | |
4298 | info1 = __le32_to_cpu(rxd->ppdu_start.info1); |
4299 | info2 = __le32_to_cpu(rxd->ppdu_start.info2); |
4300 | @@ -993,11 +994,16 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, |
4301 | */ |
4302 | bw = info2 & 3; |
4303 | sgi = info3 & 1; |
4304 | + stbc = (info2 >> 3) & 1; |
4305 | group_id = (info2 >> 4) & 0x3F; |
4306 | |
4307 | if (GROUP_ID_IS_SU_MIMO(group_id)) { |
4308 | mcs = (info3 >> 4) & 0x0F; |
4309 | - nss = ((info2 >> 10) & 0x07) + 1; |
4310 | + nsts_su = ((info2 >> 10) & 0x07); |
4311 | + if (stbc) |
4312 | + nss = (nsts_su >> 2) + 1; |
4313 | + else |
4314 | + nss = (nsts_su + 1); |
4315 | } else { |
4316 | /* Hardware doesn't decode VHT-SIG-B into Rx descriptor |
4317 | * so it's impossible to decode MCS. Also since |
4318 | diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c |
4319 | index 8fe626deadeb0..24b1927a07518 100644 |
4320 | --- a/drivers/net/wireless/ath/ath10k/sdio.c |
4321 | +++ b/drivers/net/wireless/ath/ath10k/sdio.c |
4322 | @@ -550,6 +550,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar, |
4323 | le16_to_cpu(htc_hdr->len), |
4324 | ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH); |
4325 | ret = -ENOMEM; |
4326 | + |
4327 | + queue_work(ar->workqueue, &ar->restart_work); |
4328 | + ath10k_warn(ar, "exceeds length, start recovery\n"); |
4329 | + |
4330 | goto err; |
4331 | } |
4332 | |
4333 | diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c |
4334 | index 79c8a858b6d6f..a30fcfbf2ee7c 100644 |
4335 | --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c |
4336 | +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c |
4337 | @@ -304,10 +304,12 @@ void brcmf_fweh_detach(struct brcmf_pub *drvr) |
4338 | { |
4339 | struct brcmf_fweh_info *fweh = &drvr->fweh; |
4340 | |
4341 | - /* cancel the worker */ |
4342 | - cancel_work_sync(&fweh->event_work); |
4343 | - WARN_ON(!list_empty(&fweh->event_q)); |
4344 | - memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler)); |
4345 | + /* cancel the worker if initialized */ |
4346 | + if (fweh->event_work.func) { |
4347 | + cancel_work_sync(&fweh->event_work); |
4348 | + WARN_ON(!list_empty(&fweh->event_q)); |
4349 | + memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler)); |
4350 | + } |
4351 | } |
4352 | |
4353 | /** |
4354 | diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h |
4355 | index 05847eb91a1b4..32fe131ba366d 100644 |
4356 | --- a/drivers/net/xen-netback/common.h |
4357 | +++ b/drivers/net/xen-netback/common.h |
4358 | @@ -140,6 +140,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */ |
4359 | char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ |
4360 | struct xenvif *vif; /* Parent VIF */ |
4361 | |
4362 | + /* |
4363 | + * TX/RX common EOI handling. |
4364 | + * When feature-split-event-channels = 0, interrupt handler sets |
4365 | + * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set |
4366 | + * by the RX and TX interrupt handlers. |
4367 | + * RX and TX handler threads will issue an EOI when either |
4368 | + * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or |
4369 | + * NETBK_TX_EOI) are set and they will reset those bits. |
4370 | + */ |
4371 | + atomic_t eoi_pending; |
4372 | +#define NETBK_RX_EOI 0x01 |
4373 | +#define NETBK_TX_EOI 0x02 |
4374 | +#define NETBK_COMMON_EOI 0x04 |
4375 | + |
4376 | /* Use NAPI for guest TX */ |
4377 | struct napi_struct napi; |
4378 | /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ |
4379 | @@ -375,6 +389,7 @@ int xenvif_dealloc_kthread(void *data); |
4380 | |
4381 | irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data); |
4382 | |
4383 | +bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread); |
4384 | void xenvif_rx_action(struct xenvif_queue *queue); |
4385 | void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); |
4386 | |
4387 | diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c |
4388 | index 103ed00775eb4..e889488b84a03 100644 |
4389 | --- a/drivers/net/xen-netback/interface.c |
4390 | +++ b/drivers/net/xen-netback/interface.c |
4391 | @@ -77,12 +77,28 @@ int xenvif_schedulable(struct xenvif *vif) |
4392 | !vif->disabled; |
4393 | } |
4394 | |
4395 | +static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) |
4396 | +{ |
4397 | + bool rc; |
4398 | + |
4399 | + rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); |
4400 | + if (rc) |
4401 | + napi_schedule(&queue->napi); |
4402 | + return rc; |
4403 | +} |
4404 | + |
4405 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
4406 | { |
4407 | struct xenvif_queue *queue = dev_id; |
4408 | + int old; |
4409 | |
4410 | - if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) |
4411 | - napi_schedule(&queue->napi); |
4412 | + old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending); |
4413 | + WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n"); |
4414 | + |
4415 | + if (!xenvif_handle_tx_interrupt(queue)) { |
4416 | + atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending); |
4417 | + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); |
4418 | + } |
4419 | |
4420 | return IRQ_HANDLED; |
4421 | } |
4422 | @@ -116,19 +132,46 @@ static int xenvif_poll(struct napi_struct *napi, int budget) |
4423 | return work_done; |
4424 | } |
4425 | |
4426 | +static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue) |
4427 | +{ |
4428 | + bool rc; |
4429 | + |
4430 | + rc = xenvif_have_rx_work(queue, false); |
4431 | + if (rc) |
4432 | + xenvif_kick_thread(queue); |
4433 | + return rc; |
4434 | +} |
4435 | + |
4436 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) |
4437 | { |
4438 | struct xenvif_queue *queue = dev_id; |
4439 | + int old; |
4440 | |
4441 | - xenvif_kick_thread(queue); |
4442 | + old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending); |
4443 | + WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n"); |
4444 | + |
4445 | + if (!xenvif_handle_rx_interrupt(queue)) { |
4446 | + atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending); |
4447 | + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); |
4448 | + } |
4449 | |
4450 | return IRQ_HANDLED; |
4451 | } |
4452 | |
4453 | irqreturn_t xenvif_interrupt(int irq, void *dev_id) |
4454 | { |
4455 | - xenvif_tx_interrupt(irq, dev_id); |
4456 | - xenvif_rx_interrupt(irq, dev_id); |
4457 | + struct xenvif_queue *queue = dev_id; |
4458 | + int old; |
4459 | + |
4460 | + old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending); |
4461 | + WARN(old, "Interrupt while EOI pending\n"); |
4462 | + |
4463 | + /* Use bitwise or as we need to call both functions. */ |
4464 | + if ((!xenvif_handle_tx_interrupt(queue) | |
4465 | + !xenvif_handle_rx_interrupt(queue))) { |
4466 | + atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending); |
4467 | + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); |
4468 | + } |
4469 | |
4470 | return IRQ_HANDLED; |
4471 | } |
4472 | @@ -595,7 +638,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, |
4473 | shared = (struct xen_netif_ctrl_sring *)addr; |
4474 | BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE); |
4475 | |
4476 | - err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn); |
4477 | + err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn); |
4478 | if (err < 0) |
4479 | goto err_unmap; |
4480 | |
4481 | @@ -653,7 +696,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, |
4482 | |
4483 | if (tx_evtchn == rx_evtchn) { |
4484 | /* feature-split-event-channels == 0 */ |
4485 | - err = bind_interdomain_evtchn_to_irqhandler( |
4486 | + err = bind_interdomain_evtchn_to_irqhandler_lateeoi( |
4487 | queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, |
4488 | queue->name, queue); |
4489 | if (err < 0) |
4490 | @@ -664,7 +707,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, |
4491 | /* feature-split-event-channels == 1 */ |
4492 | snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), |
4493 | "%s-tx", queue->name); |
4494 | - err = bind_interdomain_evtchn_to_irqhandler( |
4495 | + err = bind_interdomain_evtchn_to_irqhandler_lateeoi( |
4496 | queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, |
4497 | queue->tx_irq_name, queue); |
4498 | if (err < 0) |
4499 | @@ -674,7 +717,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, |
4500 | |
4501 | snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), |
4502 | "%s-rx", queue->name); |
4503 | - err = bind_interdomain_evtchn_to_irqhandler( |
4504 | + err = bind_interdomain_evtchn_to_irqhandler_lateeoi( |
4505 | queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, |
4506 | queue->rx_irq_name, queue); |
4507 | if (err < 0) |
4508 | diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c |
4509 | index 0020b2e8c279d..fa1ac0abc924b 100644 |
4510 | --- a/drivers/net/xen-netback/netback.c |
4511 | +++ b/drivers/net/xen-netback/netback.c |
4512 | @@ -162,6 +162,10 @@ void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) |
4513 | |
4514 | if (more_to_do) |
4515 | napi_schedule(&queue->napi); |
4516 | + else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI, |
4517 | + &queue->eoi_pending) & |
4518 | + (NETBK_TX_EOI | NETBK_COMMON_EOI)) |
4519 | + xen_irq_lateeoi(queue->tx_irq, 0); |
4520 | } |
4521 | |
4522 | static void tx_add_credit(struct xenvif_queue *queue) |
4523 | @@ -1622,9 +1626,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif) |
4524 | irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data) |
4525 | { |
4526 | struct xenvif *vif = data; |
4527 | + unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS; |
4528 | |
4529 | - while (xenvif_ctrl_work_todo(vif)) |
4530 | + while (xenvif_ctrl_work_todo(vif)) { |
4531 | xenvif_ctrl_action(vif); |
4532 | + eoi_flag = 0; |
4533 | + } |
4534 | + |
4535 | + xen_irq_lateeoi(irq, eoi_flag); |
4536 | |
4537 | return IRQ_HANDLED; |
4538 | } |
4539 | diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c |
4540 | index ef5887037b225..9b62f65b630e4 100644 |
4541 | --- a/drivers/net/xen-netback/rx.c |
4542 | +++ b/drivers/net/xen-netback/rx.c |
4543 | @@ -490,13 +490,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) |
4544 | return queue->stalled && prod - cons >= 1; |
4545 | } |
4546 | |
4547 | -static bool xenvif_have_rx_work(struct xenvif_queue *queue) |
4548 | +bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) |
4549 | { |
4550 | return xenvif_rx_ring_slots_available(queue) || |
4551 | (queue->vif->stall_timeout && |
4552 | (xenvif_rx_queue_stalled(queue) || |
4553 | xenvif_rx_queue_ready(queue))) || |
4554 | - kthread_should_stop() || |
4555 | + (test_kthread && kthread_should_stop()) || |
4556 | queue->vif->disabled; |
4557 | } |
4558 | |
4559 | @@ -527,15 +527,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) |
4560 | { |
4561 | DEFINE_WAIT(wait); |
4562 | |
4563 | - if (xenvif_have_rx_work(queue)) |
4564 | + if (xenvif_have_rx_work(queue, true)) |
4565 | return; |
4566 | |
4567 | for (;;) { |
4568 | long ret; |
4569 | |
4570 | prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); |
4571 | - if (xenvif_have_rx_work(queue)) |
4572 | + if (xenvif_have_rx_work(queue, true)) |
4573 | break; |
4574 | + if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI, |
4575 | + &queue->eoi_pending) & |
4576 | + (NETBK_RX_EOI | NETBK_COMMON_EOI)) |
4577 | + xen_irq_lateeoi(queue->rx_irq, 0); |
4578 | + |
4579 | ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); |
4580 | if (!ret) |
4581 | break; |
4582 | diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c |
4583 | index abe4fe496d05c..a41ee9feab8e7 100644 |
4584 | --- a/drivers/nvme/host/rdma.c |
4585 | +++ b/drivers/nvme/host/rdma.c |
4586 | @@ -1679,7 +1679,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, |
4587 | complete(&queue->cm_done); |
4588 | return 0; |
4589 | case RDMA_CM_EVENT_REJECTED: |
4590 | - nvme_rdma_destroy_queue_ib(queue); |
4591 | cm_error = nvme_rdma_conn_rejected(queue, ev); |
4592 | break; |
4593 | case RDMA_CM_EVENT_ROUTE_ERROR: |
4594 | diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c |
4595 | index 0c02d500158f0..3f40f951a6cdc 100644 |
4596 | --- a/drivers/pci/pci-acpi.c |
4597 | +++ b/drivers/pci/pci-acpi.c |
4598 | @@ -944,6 +944,16 @@ static bool acpi_pci_bridge_d3(struct pci_dev *dev) |
4599 | if (!dev->is_hotplug_bridge) |
4600 | return false; |
4601 | |
4602 | + /* Assume D3 support if the bridge is power-manageable by ACPI. */ |
4603 | + adev = ACPI_COMPANION(&dev->dev); |
4604 | + if (!adev && !pci_dev_is_added(dev)) { |
4605 | + adev = acpi_pci_find_companion(&dev->dev); |
4606 | + ACPI_COMPANION_SET(&dev->dev, adev); |
4607 | + } |
4608 | + |
4609 | + if (adev && acpi_device_power_manageable(adev)) |
4610 | + return true; |
4611 | + |
4612 | /* |
4613 | * Look for a special _DSD property for the root port and if it |
4614 | * is set we know the hierarchy behind it supports D3 just fine. |
4615 | diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c |
4616 | index 664e50103eaaf..aff0a0a5e7f8c 100644 |
4617 | --- a/drivers/power/supply/bq27xxx_battery.c |
4618 | +++ b/drivers/power/supply/bq27xxx_battery.c |
4619 | @@ -1678,8 +1678,6 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di, |
4620 | status = POWER_SUPPLY_STATUS_FULL; |
4621 | else if (di->cache.flags & BQ27000_FLAG_CHGS) |
4622 | status = POWER_SUPPLY_STATUS_CHARGING; |
4623 | - else if (power_supply_am_i_supplied(di->bat) > 0) |
4624 | - status = POWER_SUPPLY_STATUS_NOT_CHARGING; |
4625 | else |
4626 | status = POWER_SUPPLY_STATUS_DISCHARGING; |
4627 | } else { |
4628 | @@ -1691,6 +1689,10 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di, |
4629 | status = POWER_SUPPLY_STATUS_CHARGING; |
4630 | } |
4631 | |
4632 | + if ((status == POWER_SUPPLY_STATUS_DISCHARGING) && |
4633 | + (power_supply_am_i_supplied(di->bat) > 0)) |
4634 | + status = POWER_SUPPLY_STATUS_NOT_CHARGING; |
4635 | + |
4636 | val->intval = status; |
4637 | |
4638 | return 0; |
4639 | diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c |
4640 | index c3cad2b6dabae..1139ca7251952 100644 |
4641 | --- a/drivers/power/supply/test_power.c |
4642 | +++ b/drivers/power/supply/test_power.c |
4643 | @@ -341,6 +341,7 @@ static int param_set_ac_online(const char *key, const struct kernel_param *kp) |
4644 | static int param_get_ac_online(char *buffer, const struct kernel_param *kp) |
4645 | { |
4646 | strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown")); |
4647 | + strcat(buffer, "\n"); |
4648 | return strlen(buffer); |
4649 | } |
4650 | |
4651 | @@ -354,6 +355,7 @@ static int param_set_usb_online(const char *key, const struct kernel_param *kp) |
4652 | static int param_get_usb_online(char *buffer, const struct kernel_param *kp) |
4653 | { |
4654 | strcpy(buffer, map_get_key(map_ac_online, usb_online, "unknown")); |
4655 | + strcat(buffer, "\n"); |
4656 | return strlen(buffer); |
4657 | } |
4658 | |
4659 | @@ -368,6 +370,7 @@ static int param_set_battery_status(const char *key, |
4660 | static int param_get_battery_status(char *buffer, const struct kernel_param *kp) |
4661 | { |
4662 | strcpy(buffer, map_get_key(map_status, battery_status, "unknown")); |
4663 | + strcat(buffer, "\n"); |
4664 | return strlen(buffer); |
4665 | } |
4666 | |
4667 | @@ -382,6 +385,7 @@ static int param_set_battery_health(const char *key, |
4668 | static int param_get_battery_health(char *buffer, const struct kernel_param *kp) |
4669 | { |
4670 | strcpy(buffer, map_get_key(map_health, battery_health, "unknown")); |
4671 | + strcat(buffer, "\n"); |
4672 | return strlen(buffer); |
4673 | } |
4674 | |
4675 | @@ -397,6 +401,7 @@ static int param_get_battery_present(char *buffer, |
4676 | const struct kernel_param *kp) |
4677 | { |
4678 | strcpy(buffer, map_get_key(map_present, battery_present, "unknown")); |
4679 | + strcat(buffer, "\n"); |
4680 | return strlen(buffer); |
4681 | } |
4682 | |
4683 | @@ -414,6 +419,7 @@ static int param_get_battery_technology(char *buffer, |
4684 | { |
4685 | strcpy(buffer, |
4686 | map_get_key(map_technology, battery_technology, "unknown")); |
4687 | + strcat(buffer, "\n"); |
4688 | return strlen(buffer); |
4689 | } |
4690 | |
4691 | diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c |
4692 | index 1995f5b3ea677..d5114abcde197 100644 |
4693 | --- a/drivers/rpmsg/qcom_glink_native.c |
4694 | +++ b/drivers/rpmsg/qcom_glink_native.c |
4695 | @@ -970,7 +970,7 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid) |
4696 | return -EINVAL; |
4697 | } |
4698 | |
4699 | - complete(&channel->open_ack); |
4700 | + complete_all(&channel->open_ack); |
4701 | |
4702 | return 0; |
4703 | } |
4704 | @@ -1178,7 +1178,7 @@ static int qcom_glink_announce_create(struct rpmsg_device *rpdev) |
4705 | __be32 *val = defaults; |
4706 | int size; |
4707 | |
4708 | - if (glink->intentless) |
4709 | + if (glink->intentless || !completion_done(&channel->open_ack)) |
4710 | return 0; |
4711 | |
4712 | prop = of_find_property(np, "qcom,intents", NULL); |
4713 | @@ -1413,7 +1413,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, |
4714 | channel->rcid = ret; |
4715 | spin_unlock_irqrestore(&glink->idr_lock, flags); |
4716 | |
4717 | - complete(&channel->open_req); |
4718 | + complete_all(&channel->open_req); |
4719 | |
4720 | if (create_device) { |
4721 | rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL); |
4722 | diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c |
4723 | index 8102469e27c05..0c436aea0da43 100644 |
4724 | --- a/drivers/rtc/rtc-rx8010.c |
4725 | +++ b/drivers/rtc/rtc-rx8010.c |
4726 | @@ -424,16 +424,26 @@ static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) |
4727 | } |
4728 | } |
4729 | |
4730 | -static struct rtc_class_ops rx8010_rtc_ops = { |
4731 | +static const struct rtc_class_ops rx8010_rtc_ops_default = { |
4732 | .read_time = rx8010_get_time, |
4733 | .set_time = rx8010_set_time, |
4734 | .ioctl = rx8010_ioctl, |
4735 | }; |
4736 | |
4737 | +static const struct rtc_class_ops rx8010_rtc_ops_alarm = { |
4738 | + .read_time = rx8010_get_time, |
4739 | + .set_time = rx8010_set_time, |
4740 | + .ioctl = rx8010_ioctl, |
4741 | + .read_alarm = rx8010_read_alarm, |
4742 | + .set_alarm = rx8010_set_alarm, |
4743 | + .alarm_irq_enable = rx8010_alarm_irq_enable, |
4744 | +}; |
4745 | + |
4746 | static int rx8010_probe(struct i2c_client *client, |
4747 | const struct i2c_device_id *id) |
4748 | { |
4749 | struct i2c_adapter *adapter = client->adapter; |
4750 | + const struct rtc_class_ops *rtc_ops; |
4751 | struct rx8010_data *rx8010; |
4752 | int err = 0; |
4753 | |
4754 | @@ -464,16 +474,16 @@ static int rx8010_probe(struct i2c_client *client, |
4755 | |
4756 | if (err) { |
4757 | dev_err(&client->dev, "unable to request IRQ\n"); |
4758 | - client->irq = 0; |
4759 | - } else { |
4760 | - rx8010_rtc_ops.read_alarm = rx8010_read_alarm; |
4761 | - rx8010_rtc_ops.set_alarm = rx8010_set_alarm; |
4762 | - rx8010_rtc_ops.alarm_irq_enable = rx8010_alarm_irq_enable; |
4763 | + return err; |
4764 | } |
4765 | + |
4766 | + rtc_ops = &rx8010_rtc_ops_alarm; |
4767 | + } else { |
4768 | + rtc_ops = &rx8010_rtc_ops_default; |
4769 | } |
4770 | |
4771 | rx8010->rtc = devm_rtc_device_register(&client->dev, client->name, |
4772 | - &rx8010_rtc_ops, THIS_MODULE); |
4773 | + rtc_ops, THIS_MODULE); |
4774 | |
4775 | if (IS_ERR(rx8010->rtc)) { |
4776 | dev_err(&client->dev, "unable to register the class device\n"); |
4777 | diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c |
4778 | index d84d95cac2a13..412009e2b9488 100644 |
4779 | --- a/drivers/scsi/qla2xxx/qla_target.c |
4780 | +++ b/drivers/scsi/qla2xxx/qla_target.c |
4781 | @@ -1230,14 +1230,15 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) |
4782 | case DSC_DELETE_PEND: |
4783 | return; |
4784 | case DSC_DELETED: |
4785 | - if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) |
4786 | - wake_up_all(&tgt->waitQ); |
4787 | - if (sess->vha->fcport_count == 0) |
4788 | - wake_up_all(&sess->vha->fcport_waitQ); |
4789 | - |
4790 | if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && |
4791 | - !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) |
4792 | + !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) { |
4793 | + if (tgt && tgt->tgt_stop && tgt->sess_count == 0) |
4794 | + wake_up_all(&tgt->waitQ); |
4795 | + |
4796 | + if (sess->vha->fcport_count == 0) |
4797 | + wake_up_all(&sess->vha->fcport_waitQ); |
4798 | return; |
4799 | + } |
4800 | break; |
4801 | case DSC_UPD_FCPORT: |
4802 | /* |
4803 | diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c |
4804 | index fa597e27be17c..e60aff9903951 100644 |
4805 | --- a/drivers/spi/spi-sprd.c |
4806 | +++ b/drivers/spi/spi-sprd.c |
4807 | @@ -563,11 +563,11 @@ static int sprd_spi_dma_request(struct sprd_spi *ss) |
4808 | |
4809 | ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn"); |
4810 | if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) { |
4811 | + dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]); |
4812 | if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]) == -EPROBE_DEFER) |
4813 | return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]); |
4814 | |
4815 | dev_err(ss->dev, "request TX DMA channel failed!\n"); |
4816 | - dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]); |
4817 | return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]); |
4818 | } |
4819 | |
4820 | diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c |
4821 | index 02ae00c953130..1893c70de0b93 100644 |
4822 | --- a/drivers/staging/comedi/drivers/cb_pcidas.c |
4823 | +++ b/drivers/staging/comedi/drivers/cb_pcidas.c |
4824 | @@ -1342,6 +1342,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev, |
4825 | if (dev->irq && board->has_ao_fifo) { |
4826 | dev->write_subdev = s; |
4827 | s->subdev_flags |= SDF_CMD_WRITE; |
4828 | + s->len_chanlist = s->n_chan; |
4829 | s->do_cmdtest = cb_pcidas_ao_cmdtest; |
4830 | s->do_cmd = cb_pcidas_ao_cmd; |
4831 | s->cancel = cb_pcidas_ao_cancel; |
4832 | diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c |
4833 | index 2ecffa42e561c..fbe693816f070 100644 |
4834 | --- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c |
4835 | +++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c |
4836 | @@ -297,7 +297,7 @@ static int controller_probe(struct platform_device *pdev) |
4837 | regulator = devm_regulator_register(dev, &can_power_desc, &config); |
4838 | if (IS_ERR(regulator)) { |
4839 | err = PTR_ERR(regulator); |
4840 | - goto out_reset; |
4841 | + goto out_ida; |
4842 | } |
4843 | /* make controller info visible to userspace */ |
4844 | cd->class_dev = kzalloc(sizeof(*cd->class_dev), GFP_KERNEL); |
4845 | diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c |
4846 | index ffac0c4b3f5ce..022c71e69a236 100644 |
4847 | --- a/drivers/staging/octeon/ethernet-mdio.c |
4848 | +++ b/drivers/staging/octeon/ethernet-mdio.c |
4849 | @@ -147,12 +147,6 @@ int cvm_oct_phy_setup_device(struct net_device *dev) |
4850 | |
4851 | phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0); |
4852 | if (!phy_node && of_phy_is_fixed_link(priv->of_node)) { |
4853 | - int rc; |
4854 | - |
4855 | - rc = of_phy_register_fixed_link(priv->of_node); |
4856 | - if (rc) |
4857 | - return rc; |
4858 | - |
4859 | phy_node = of_node_get(priv->of_node); |
4860 | } |
4861 | if (!phy_node) |
4862 | diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c |
4863 | index 0e65955c746b1..9a242d68a26b1 100644 |
4864 | --- a/drivers/staging/octeon/ethernet-rx.c |
4865 | +++ b/drivers/staging/octeon/ethernet-rx.c |
4866 | @@ -69,15 +69,17 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) |
4867 | else |
4868 | port = work->word1.cn38xx.ipprt; |
4869 | |
4870 | - if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) { |
4871 | + if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) |
4872 | /* |
4873 | * Ignore length errors on min size packets. Some |
4874 | * equipment incorrectly pads packets to 64+4FCS |
4875 | * instead of 60+4FCS. Note these packets still get |
4876 | * counted as frame errors. |
4877 | */ |
4878 | - } else if (work->word2.snoip.err_code == 5 || |
4879 | - work->word2.snoip.err_code == 7) { |
4880 | + return 0; |
4881 | + |
4882 | + if (work->word2.snoip.err_code == 5 || |
4883 | + work->word2.snoip.err_code == 7) { |
4884 | /* |
4885 | * We received a packet with either an alignment error |
4886 | * or a FCS error. This may be signalling that we are |
4887 | @@ -108,7 +110,10 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) |
4888 | /* Port received 0xd5 preamble */ |
4889 | work->packet_ptr.s.addr += i + 1; |
4890 | work->word1.len -= i + 5; |
4891 | - } else if ((*ptr & 0xf) == 0xd) { |
4892 | + return 0; |
4893 | + } |
4894 | + |
4895 | + if ((*ptr & 0xf) == 0xd) { |
4896 | /* Port received 0xd preamble */ |
4897 | work->packet_ptr.s.addr += i; |
4898 | work->word1.len -= i + 4; |
4899 | @@ -118,21 +123,20 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) |
4900 | ((*(ptr + 1) & 0xf) << 4); |
4901 | ptr++; |
4902 | } |
4903 | - } else { |
4904 | - printk_ratelimited("Port %d unknown preamble, packet dropped\n", |
4905 | - port); |
4906 | - cvm_oct_free_work(work); |
4907 | - return 1; |
4908 | + return 0; |
4909 | } |
4910 | + |
4911 | + printk_ratelimited("Port %d unknown preamble, packet dropped\n", |
4912 | + port); |
4913 | + cvm_oct_free_work(work); |
4914 | + return 1; |
4915 | } |
4916 | - } else { |
4917 | - printk_ratelimited("Port %d receive error code %d, packet dropped\n", |
4918 | - port, work->word2.snoip.err_code); |
4919 | - cvm_oct_free_work(work); |
4920 | - return 1; |
4921 | } |
4922 | |
4923 | - return 0; |
4924 | + printk_ratelimited("Port %d receive error code %d, packet dropped\n", |
4925 | + port, work->word2.snoip.err_code); |
4926 | + cvm_oct_free_work(work); |
4927 | + return 1; |
4928 | } |
4929 | |
4930 | static void copy_segments_to_skb(cvmx_wqe_t *work, struct sk_buff *skb) |
4931 | diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c |
4932 | index cf8e9a23ebf9f..77b7c1fd9d785 100644 |
4933 | --- a/drivers/staging/octeon/ethernet.c |
4934 | +++ b/drivers/staging/octeon/ethernet.c |
4935 | @@ -13,6 +13,7 @@ |
4936 | #include <linux/phy.h> |
4937 | #include <linux/slab.h> |
4938 | #include <linux/interrupt.h> |
4939 | +#include <linux/of_mdio.h> |
4940 | #include <linux/of_net.h> |
4941 | #include <linux/if_ether.h> |
4942 | #include <linux/if_vlan.h> |
4943 | @@ -894,6 +895,14 @@ static int cvm_oct_probe(struct platform_device *pdev) |
4944 | break; |
4945 | } |
4946 | |
4947 | + if (priv->of_node && of_phy_is_fixed_link(priv->of_node)) { |
4948 | + if (of_phy_register_fixed_link(priv->of_node)) { |
4949 | + netdev_err(dev, "Failed to register fixed link for interface %d, port %d\n", |
4950 | + interface, priv->port); |
4951 | + dev->netdev_ops = NULL; |
4952 | + } |
4953 | + } |
4954 | + |
4955 | if (!dev->netdev_ops) { |
4956 | free_netdev(dev); |
4957 | } else if (register_netdev(dev) < 0) { |
4958 | diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c |
4959 | index 568b2171f3359..b6e78fdbfdff9 100644 |
4960 | --- a/drivers/tty/vt/keyboard.c |
4961 | +++ b/drivers/tty/vt/keyboard.c |
4962 | @@ -742,8 +742,13 @@ static void k_fn(struct vc_data *vc, unsigned char value, char up_flag) |
4963 | return; |
4964 | |
4965 | if ((unsigned)value < ARRAY_SIZE(func_table)) { |
4966 | + unsigned long flags; |
4967 | + |
4968 | + spin_lock_irqsave(&func_buf_lock, flags); |
4969 | if (func_table[value]) |
4970 | puts_queue(vc, func_table[value]); |
4971 | + spin_unlock_irqrestore(&func_buf_lock, flags); |
4972 | + |
4973 | } else |
4974 | pr_err("k_fn called with value=%d\n", value); |
4975 | } |
4976 | @@ -1990,13 +1995,11 @@ out: |
4977 | #undef s |
4978 | #undef v |
4979 | |
4980 | -/* FIXME: This one needs untangling and locking */ |
4981 | +/* FIXME: This one needs untangling */ |
4982 | int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) |
4983 | { |
4984 | struct kbsentry *kbs; |
4985 | - char *p; |
4986 | u_char *q; |
4987 | - u_char __user *up; |
4988 | int sz, fnw_sz; |
4989 | int delta; |
4990 | char *first_free, *fj, *fnw; |
4991 | @@ -2022,23 +2025,19 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) |
4992 | i = kbs->kb_func; |
4993 | |
4994 | switch (cmd) { |
4995 | - case KDGKBSENT: |
4996 | - sz = sizeof(kbs->kb_string) - 1; /* sz should have been |
4997 | - a struct member */ |
4998 | - up = user_kdgkb->kb_string; |
4999 | - p = func_table[i]; |
5000 | - if(p) |
5001 | - for ( ; *p && sz; p++, sz--) |
5002 | - if (put_user(*p, up++)) { |
5003 | - ret = -EFAULT; |
5004 | - goto reterr; |
5005 | - } |
5006 | - if (put_user('\0', up)) { |
5007 | - ret = -EFAULT; |
5008 | - goto reterr; |
5009 | - } |
5010 | - kfree(kbs); |
5011 | - return ((p && *p) ? -EOVERFLOW : 0); |
5012 | + case KDGKBSENT: { |
5013 | + /* size should have been a struct member */ |
5014 | + ssize_t len = sizeof(user_kdgkb->kb_string); |
5015 | + |
5016 | + spin_lock_irqsave(&func_buf_lock, flags); |
5017 | + len = strlcpy(kbs->kb_string, func_table[i] ? : "", len); |
5018 | + spin_unlock_irqrestore(&func_buf_lock, flags); |
5019 | + |
5020 | + ret = copy_to_user(user_kdgkb->kb_string, kbs->kb_string, |
5021 | + len + 1) ? -EFAULT : 0; |
5022 | + |
5023 | + goto reterr; |
5024 | + } |
5025 | case KDSKBSENT: |
5026 | if (!perm) { |
5027 | ret = -EPERM; |
5028 | diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c |
5029 | index cbc85c995d92d..4f51be17427c0 100644 |
5030 | --- a/drivers/tty/vt/vt_ioctl.c |
5031 | +++ b/drivers/tty/vt/vt_ioctl.c |
5032 | @@ -244,7 +244,7 @@ int vt_waitactive(int n) |
5033 | |
5034 | |
5035 | static inline int |
5036 | -do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struct console_font_op *op) |
5037 | +do_fontx_ioctl(struct vc_data *vc, int cmd, struct consolefontdesc __user *user_cfd, int perm, struct console_font_op *op) |
5038 | { |
5039 | struct consolefontdesc cfdarg; |
5040 | int i; |
5041 | @@ -262,15 +262,16 @@ do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struc |
5042 | op->height = cfdarg.charheight; |
5043 | op->charcount = cfdarg.charcount; |
5044 | op->data = cfdarg.chardata; |
5045 | - return con_font_op(vc_cons[fg_console].d, op); |
5046 | - case GIO_FONTX: { |
5047 | + return con_font_op(vc, op); |
5048 | + |
5049 | + case GIO_FONTX: |
5050 | op->op = KD_FONT_OP_GET; |
5051 | op->flags = KD_FONT_FLAG_OLD; |
5052 | op->width = 8; |
5053 | op->height = cfdarg.charheight; |
5054 | op->charcount = cfdarg.charcount; |
5055 | op->data = cfdarg.chardata; |
5056 | - i = con_font_op(vc_cons[fg_console].d, op); |
5057 | + i = con_font_op(vc, op); |
5058 | if (i) |
5059 | return i; |
5060 | cfdarg.charheight = op->height; |
5061 | @@ -278,7 +279,6 @@ do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struc |
5062 | if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc))) |
5063 | return -EFAULT; |
5064 | return 0; |
5065 | - } |
5066 | } |
5067 | return -EINVAL; |
5068 | } |
5069 | @@ -924,7 +924,7 @@ int vt_ioctl(struct tty_struct *tty, |
5070 | op.height = 0; |
5071 | op.charcount = 256; |
5072 | op.data = up; |
5073 | - ret = con_font_op(vc_cons[fg_console].d, &op); |
5074 | + ret = con_font_op(vc, &op); |
5075 | break; |
5076 | } |
5077 | |
5078 | @@ -935,7 +935,7 @@ int vt_ioctl(struct tty_struct *tty, |
5079 | op.height = 32; |
5080 | op.charcount = 256; |
5081 | op.data = up; |
5082 | - ret = con_font_op(vc_cons[fg_console].d, &op); |
5083 | + ret = con_font_op(vc, &op); |
5084 | break; |
5085 | } |
5086 | |
5087 | @@ -952,7 +952,7 @@ int vt_ioctl(struct tty_struct *tty, |
5088 | |
5089 | case PIO_FONTX: |
5090 | case GIO_FONTX: |
5091 | - ret = do_fontx_ioctl(cmd, up, perm, &op); |
5092 | + ret = do_fontx_ioctl(vc, cmd, up, perm, &op); |
5093 | break; |
5094 | |
5095 | case PIO_FONTRESET: |
5096 | @@ -969,11 +969,11 @@ int vt_ioctl(struct tty_struct *tty, |
5097 | { |
5098 | op.op = KD_FONT_OP_SET_DEFAULT; |
5099 | op.data = NULL; |
5100 | - ret = con_font_op(vc_cons[fg_console].d, &op); |
5101 | + ret = con_font_op(vc, &op); |
5102 | if (ret) |
5103 | break; |
5104 | console_lock(); |
5105 | - con_set_default_unimap(vc_cons[fg_console].d); |
5106 | + con_set_default_unimap(vc); |
5107 | console_unlock(); |
5108 | break; |
5109 | } |
5110 | @@ -1100,8 +1100,9 @@ struct compat_consolefontdesc { |
5111 | }; |
5112 | |
5113 | static inline int |
5114 | -compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, |
5115 | - int perm, struct console_font_op *op) |
5116 | +compat_fontx_ioctl(struct vc_data *vc, int cmd, |
5117 | + struct compat_consolefontdesc __user *user_cfd, |
5118 | + int perm, struct console_font_op *op) |
5119 | { |
5120 | struct compat_consolefontdesc cfdarg; |
5121 | int i; |
5122 | @@ -1119,7 +1120,8 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, |
5123 | op->height = cfdarg.charheight; |
5124 | op->charcount = cfdarg.charcount; |
5125 | op->data = compat_ptr(cfdarg.chardata); |
5126 | - return con_font_op(vc_cons[fg_console].d, op); |
5127 | + return con_font_op(vc, op); |
5128 | + |
5129 | case GIO_FONTX: |
5130 | op->op = KD_FONT_OP_GET; |
5131 | op->flags = KD_FONT_FLAG_OLD; |
5132 | @@ -1127,7 +1129,7 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, |
5133 | op->height = cfdarg.charheight; |
5134 | op->charcount = cfdarg.charcount; |
5135 | op->data = compat_ptr(cfdarg.chardata); |
5136 | - i = con_font_op(vc_cons[fg_console].d, op); |
5137 | + i = con_font_op(vc, op); |
5138 | if (i) |
5139 | return i; |
5140 | cfdarg.charheight = op->height; |
5141 | @@ -1217,7 +1219,7 @@ long vt_compat_ioctl(struct tty_struct *tty, |
5142 | */ |
5143 | case PIO_FONTX: |
5144 | case GIO_FONTX: |
5145 | - return compat_fontx_ioctl(cmd, up, perm, &op); |
5146 | + return compat_fontx_ioctl(vc, cmd, up, perm, &op); |
5147 | |
5148 | case KDFONTOP: |
5149 | return compat_kdfontop_ioctl(up, perm, &op, vc); |
5150 | diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c |
5151 | index a57698985f9c4..8313f81968d51 100644 |
5152 | --- a/drivers/uio/uio.c |
5153 | +++ b/drivers/uio/uio.c |
5154 | @@ -1010,8 +1010,6 @@ void uio_unregister_device(struct uio_info *info) |
5155 | |
5156 | idev = info->uio_dev; |
5157 | |
5158 | - uio_free_minor(idev); |
5159 | - |
5160 | mutex_lock(&idev->info_lock); |
5161 | uio_dev_del_attributes(idev); |
5162 | |
5163 | @@ -1026,6 +1024,8 @@ void uio_unregister_device(struct uio_info *info) |
5164 | |
5165 | device_unregister(&idev->dev); |
5166 | |
5167 | + uio_free_minor(idev); |
5168 | + |
5169 | return; |
5170 | } |
5171 | EXPORT_SYMBOL_GPL(uio_unregister_device); |
5172 | diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
5173 | index 808722b8294a4..ed99d98172f40 100644 |
5174 | --- a/drivers/usb/class/cdc-acm.c |
5175 | +++ b/drivers/usb/class/cdc-acm.c |
5176 | @@ -507,6 +507,7 @@ static void acm_read_bulk_callback(struct urb *urb) |
5177 | "%s - cooling babbling device\n", __func__); |
5178 | usb_mark_last_busy(acm->dev); |
5179 | set_bit(rb->index, &acm->urbs_in_error_delay); |
5180 | + set_bit(ACM_ERROR_DELAY, &acm->flags); |
5181 | cooldown = true; |
5182 | break; |
5183 | default: |
5184 | @@ -532,7 +533,7 @@ static void acm_read_bulk_callback(struct urb *urb) |
5185 | |
5186 | if (stopped || stalled || cooldown) { |
5187 | if (stalled) |
5188 | - schedule_work(&acm->work); |
5189 | + schedule_delayed_work(&acm->dwork, 0); |
5190 | else if (cooldown) |
5191 | schedule_delayed_work(&acm->dwork, HZ / 2); |
5192 | return; |
5193 | @@ -562,13 +563,13 @@ static void acm_write_bulk(struct urb *urb) |
5194 | acm_write_done(acm, wb); |
5195 | spin_unlock_irqrestore(&acm->write_lock, flags); |
5196 | set_bit(EVENT_TTY_WAKEUP, &acm->flags); |
5197 | - schedule_work(&acm->work); |
5198 | + schedule_delayed_work(&acm->dwork, 0); |
5199 | } |
5200 | |
5201 | static void acm_softint(struct work_struct *work) |
5202 | { |
5203 | int i; |
5204 | - struct acm *acm = container_of(work, struct acm, work); |
5205 | + struct acm *acm = container_of(work, struct acm, dwork.work); |
5206 | |
5207 | if (test_bit(EVENT_RX_STALL, &acm->flags)) { |
5208 | smp_mb(); /* against acm_suspend() */ |
5209 | @@ -584,7 +585,7 @@ static void acm_softint(struct work_struct *work) |
5210 | if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) { |
5211 | for (i = 0; i < acm->rx_buflimit; i++) |
5212 | if (test_and_clear_bit(i, &acm->urbs_in_error_delay)) |
5213 | - acm_submit_read_urb(acm, i, GFP_NOIO); |
5214 | + acm_submit_read_urb(acm, i, GFP_KERNEL); |
5215 | } |
5216 | |
5217 | if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags)) |
5218 | @@ -1364,7 +1365,6 @@ made_compressed_probe: |
5219 | acm->ctrlsize = ctrlsize; |
5220 | acm->readsize = readsize; |
5221 | acm->rx_buflimit = num_rx_buf; |
5222 | - INIT_WORK(&acm->work, acm_softint); |
5223 | INIT_DELAYED_WORK(&acm->dwork, acm_softint); |
5224 | init_waitqueue_head(&acm->wioctl); |
5225 | spin_lock_init(&acm->write_lock); |
5226 | @@ -1574,7 +1574,6 @@ static void acm_disconnect(struct usb_interface *intf) |
5227 | } |
5228 | |
5229 | acm_kill_urbs(acm); |
5230 | - cancel_work_sync(&acm->work); |
5231 | cancel_delayed_work_sync(&acm->dwork); |
5232 | |
5233 | tty_unregister_device(acm_tty_driver, acm->minor); |
5234 | @@ -1617,7 +1616,6 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) |
5235 | return 0; |
5236 | |
5237 | acm_kill_urbs(acm); |
5238 | - cancel_work_sync(&acm->work); |
5239 | cancel_delayed_work_sync(&acm->dwork); |
5240 | acm->urbs_in_error_delay = 0; |
5241 | |
5242 | diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h |
5243 | index cd5e9d8ab2375..b95ff769072e7 100644 |
5244 | --- a/drivers/usb/class/cdc-acm.h |
5245 | +++ b/drivers/usb/class/cdc-acm.h |
5246 | @@ -112,8 +112,7 @@ struct acm { |
5247 | # define ACM_ERROR_DELAY 3 |
5248 | unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */ |
5249 | struct usb_cdc_line_coding line; /* bits, stop, parity */ |
5250 | - struct work_struct work; /* work queue entry for various purposes*/ |
5251 | - struct delayed_work dwork; /* for cool downs needed in error recovery */ |
5252 | + struct delayed_work dwork; /* work queue entry for various purposes */ |
5253 | unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ |
5254 | unsigned int ctrlout; /* output control lines (DTR, RTS) */ |
5255 | struct async_icount iocount; /* counters for control line changes */ |
5256 | diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c |
5257 | index 4cbf295390062..440dbf55ddf70 100644 |
5258 | --- a/drivers/usb/dwc3/core.c |
5259 | +++ b/drivers/usb/dwc3/core.c |
5260 | @@ -1535,6 +1535,17 @@ static int dwc3_probe(struct platform_device *pdev) |
5261 | |
5262 | err5: |
5263 | dwc3_event_buffers_cleanup(dwc); |
5264 | + |
5265 | + usb_phy_shutdown(dwc->usb2_phy); |
5266 | + usb_phy_shutdown(dwc->usb3_phy); |
5267 | + phy_exit(dwc->usb2_generic_phy); |
5268 | + phy_exit(dwc->usb3_generic_phy); |
5269 | + |
5270 | + usb_phy_set_suspend(dwc->usb2_phy, 1); |
5271 | + usb_phy_set_suspend(dwc->usb3_phy, 1); |
5272 | + phy_power_off(dwc->usb2_generic_phy); |
5273 | + phy_power_off(dwc->usb3_generic_phy); |
5274 | + |
5275 | dwc3_ulpi_exit(dwc); |
5276 | |
5277 | err4: |
5278 | @@ -1570,9 +1581,9 @@ static int dwc3_remove(struct platform_device *pdev) |
5279 | dwc3_core_exit(dwc); |
5280 | dwc3_ulpi_exit(dwc); |
5281 | |
5282 | - pm_runtime_put_sync(&pdev->dev); |
5283 | - pm_runtime_allow(&pdev->dev); |
5284 | pm_runtime_disable(&pdev->dev); |
5285 | + pm_runtime_put_noidle(&pdev->dev); |
5286 | + pm_runtime_set_suspended(&pdev->dev); |
5287 | |
5288 | dwc3_free_event_buffers(dwc); |
5289 | dwc3_free_scratch_buffers(dwc); |
5290 | diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h |
5291 | index 4dfbffa944de1..c848f9164f929 100644 |
5292 | --- a/drivers/usb/dwc3/core.h |
5293 | +++ b/drivers/usb/dwc3/core.h |
5294 | @@ -700,6 +700,7 @@ struct dwc3_ep { |
5295 | #define DWC3_EP_END_TRANSFER_PENDING BIT(4) |
5296 | #define DWC3_EP_PENDING_REQUEST BIT(5) |
5297 | #define DWC3_EP_DELAY_START BIT(6) |
5298 | +#define DWC3_EP_PENDING_CLEAR_STALL BIT(11) |
5299 | |
5300 | /* This last one is specific to EP0 */ |
5301 | #define DWC3_EP0_DIR_IN BIT(31) |
5302 | diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c |
5303 | index 139474c3e77b1..ba88039449e03 100644 |
5304 | --- a/drivers/usb/dwc3/dwc3-pci.c |
5305 | +++ b/drivers/usb/dwc3/dwc3-pci.c |
5306 | @@ -147,7 +147,8 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc) |
5307 | |
5308 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { |
5309 | if (pdev->device == PCI_DEVICE_ID_INTEL_BXT || |
5310 | - pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) { |
5311 | + pdev->device == PCI_DEVICE_ID_INTEL_BXT_M || |
5312 | + pdev->device == PCI_DEVICE_ID_INTEL_EHLLP) { |
5313 | guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid); |
5314 | dwc->has_dsm_for_pm = true; |
5315 | } |
5316 | diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c |
5317 | index 6dee4dabc0a43..991cab9a7491d 100644 |
5318 | --- a/drivers/usb/dwc3/ep0.c |
5319 | +++ b/drivers/usb/dwc3/ep0.c |
5320 | @@ -524,6 +524,11 @@ static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc, |
5321 | ret = __dwc3_gadget_ep_set_halt(dep, set, true); |
5322 | if (ret) |
5323 | return -EINVAL; |
5324 | + |
5325 | + /* ClearFeature(Halt) may need delayed status */ |
5326 | + if (!set && (dep->flags & DWC3_EP_END_TRANSFER_PENDING)) |
5327 | + return USB_GADGET_DELAYED_STATUS; |
5328 | + |
5329 | break; |
5330 | default: |
5331 | return -EINVAL; |
5332 | @@ -942,12 +947,16 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc, |
5333 | static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, |
5334 | struct dwc3_ep *dep, struct dwc3_request *req) |
5335 | { |
5336 | + unsigned int trb_length = 0; |
5337 | int ret; |
5338 | |
5339 | req->direction = !!dep->number; |
5340 | |
5341 | if (req->request.length == 0) { |
5342 | - dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0, |
5343 | + if (!req->direction) |
5344 | + trb_length = dep->endpoint.maxpacket; |
5345 | + |
5346 | + dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, trb_length, |
5347 | DWC3_TRBCTL_CONTROL_DATA, false); |
5348 | ret = dwc3_ep0_start_trans(dep); |
5349 | } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) |
5350 | @@ -994,9 +1003,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, |
5351 | |
5352 | req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1]; |
5353 | |
5354 | + if (!req->direction) |
5355 | + trb_length = dep->endpoint.maxpacket; |
5356 | + |
5357 | /* Now prepare one extra TRB to align transfer size */ |
5358 | dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, |
5359 | - 0, DWC3_TRBCTL_CONTROL_DATA, |
5360 | + trb_length, DWC3_TRBCTL_CONTROL_DATA, |
5361 | false); |
5362 | ret = dwc3_ep0_start_trans(dep); |
5363 | } else { |
5364 | @@ -1042,6 +1054,17 @@ static void dwc3_ep0_do_control_status(struct dwc3 *dwc, |
5365 | __dwc3_ep0_do_control_status(dwc, dep); |
5366 | } |
5367 | |
5368 | +void dwc3_ep0_send_delayed_status(struct dwc3 *dwc) |
5369 | +{ |
5370 | + unsigned int direction = !dwc->ep0_expect_in; |
5371 | + |
5372 | + if (dwc->ep0state != EP0_STATUS_PHASE) |
5373 | + return; |
5374 | + |
5375 | + dwc->delayed_status = false; |
5376 | + __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]); |
5377 | +} |
5378 | + |
5379 | static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep) |
5380 | { |
5381 | struct dwc3_gadget_ep_cmd_params params; |
5382 | diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c |
5383 | index 809103254fc64..1d65de84464d5 100644 |
5384 | --- a/drivers/usb/dwc3/gadget.c |
5385 | +++ b/drivers/usb/dwc3/gadget.c |
5386 | @@ -1057,6 +1057,8 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, |
5387 | struct scatterlist *s; |
5388 | int i; |
5389 | unsigned int length = req->request.length; |
5390 | + unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); |
5391 | + unsigned int rem = length % maxp; |
5392 | unsigned int remaining = req->request.num_mapped_sgs |
5393 | - req->num_queued_sgs; |
5394 | |
5395 | @@ -1068,8 +1070,6 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, |
5396 | length -= sg_dma_len(s); |
5397 | |
5398 | for_each_sg(sg, s, remaining, i) { |
5399 | - unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); |
5400 | - unsigned int rem = length % maxp; |
5401 | unsigned int trb_length; |
5402 | unsigned chain = true; |
5403 | |
5404 | @@ -1521,8 +1521,13 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) |
5405 | list_add_tail(&req->list, &dep->pending_list); |
5406 | req->status = DWC3_REQUEST_STATUS_QUEUED; |
5407 | |
5408 | - /* Start the transfer only after the END_TRANSFER is completed */ |
5409 | - if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) { |
5410 | + /* |
5411 | + * Start the transfer only after the END_TRANSFER is completed |
5412 | + * and endpoint STALL is cleared. |
5413 | + */ |
5414 | + if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) || |
5415 | + (dep->flags & DWC3_EP_WEDGE) || |
5416 | + (dep->flags & DWC3_EP_STALL)) { |
5417 | dep->flags |= DWC3_EP_DELAY_START; |
5418 | return 0; |
5419 | } |
5420 | @@ -1714,6 +1719,18 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) |
5421 | return 0; |
5422 | } |
5423 | |
5424 | + dwc3_stop_active_transfer(dep, true, true); |
5425 | + |
5426 | + list_for_each_entry_safe(req, tmp, &dep->started_list, list) |
5427 | + dwc3_gadget_move_cancelled_request(req); |
5428 | + |
5429 | + if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) { |
5430 | + dep->flags |= DWC3_EP_PENDING_CLEAR_STALL; |
5431 | + return 0; |
5432 | + } |
5433 | + |
5434 | + dwc3_gadget_ep_cleanup_cancelled_requests(dep); |
5435 | + |
5436 | ret = dwc3_send_clear_stall_ep_cmd(dep); |
5437 | if (ret) { |
5438 | dev_err(dwc->dev, "failed to clear STALL on %s\n", |
5439 | @@ -1723,18 +1740,11 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) |
5440 | |
5441 | dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); |
5442 | |
5443 | - dwc3_stop_active_transfer(dep, true, true); |
5444 | + if ((dep->flags & DWC3_EP_DELAY_START) && |
5445 | + !usb_endpoint_xfer_isoc(dep->endpoint.desc)) |
5446 | + __dwc3_gadget_kick_transfer(dep); |
5447 | |
5448 | - list_for_each_entry_safe(req, tmp, &dep->started_list, list) |
5449 | - dwc3_gadget_move_cancelled_request(req); |
5450 | - |
5451 | - list_for_each_entry_safe(req, tmp, &dep->pending_list, list) |
5452 | - dwc3_gadget_move_cancelled_request(req); |
5453 | - |
5454 | - if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) { |
5455 | - dep->flags &= ~DWC3_EP_DELAY_START; |
5456 | - dwc3_gadget_ep_cleanup_cancelled_requests(dep); |
5457 | - } |
5458 | + dep->flags &= ~DWC3_EP_DELAY_START; |
5459 | } |
5460 | |
5461 | return ret; |
5462 | @@ -2761,6 +2771,26 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, |
5463 | dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; |
5464 | dep->flags &= ~DWC3_EP_TRANSFER_STARTED; |
5465 | dwc3_gadget_ep_cleanup_cancelled_requests(dep); |
5466 | + |
5467 | + if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) { |
5468 | + struct dwc3 *dwc = dep->dwc; |
5469 | + |
5470 | + dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL; |
5471 | + if (dwc3_send_clear_stall_ep_cmd(dep)) { |
5472 | + struct usb_ep *ep0 = &dwc->eps[0]->endpoint; |
5473 | + |
5474 | + dev_err(dwc->dev, "failed to clear STALL on %s\n", |
5475 | + dep->name); |
5476 | + if (dwc->delayed_status) |
5477 | + __dwc3_gadget_ep0_set_halt(ep0, 1); |
5478 | + return; |
5479 | + } |
5480 | + |
5481 | + dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); |
5482 | + if (dwc->delayed_status) |
5483 | + dwc3_ep0_send_delayed_status(dwc); |
5484 | + } |
5485 | + |
5486 | if ((dep->flags & DWC3_EP_DELAY_START) && |
5487 | !usb_endpoint_xfer_isoc(dep->endpoint.desc)) |
5488 | __dwc3_gadget_kick_transfer(dep); |
5489 | diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h |
5490 | index 5faf4d1249e02..f207e59c7d03c 100644 |
5491 | --- a/drivers/usb/dwc3/gadget.h |
5492 | +++ b/drivers/usb/dwc3/gadget.h |
5493 | @@ -111,6 +111,7 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value); |
5494 | int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, |
5495 | gfp_t gfp_flags); |
5496 | int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol); |
5497 | +void dwc3_ep0_send_delayed_status(struct dwc3 *dwc); |
5498 | |
5499 | /** |
5500 | * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW |
5501 | diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c |
5502 | index ae8f60f6e6a5e..44a7e58a26e3d 100644 |
5503 | --- a/drivers/usb/host/fsl-mph-dr-of.c |
5504 | +++ b/drivers/usb/host/fsl-mph-dr-of.c |
5505 | @@ -94,10 +94,13 @@ static struct platform_device *fsl_usb2_device_register( |
5506 | |
5507 | pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask; |
5508 | |
5509 | - if (!pdev->dev.dma_mask) |
5510 | + if (!pdev->dev.dma_mask) { |
5511 | pdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask; |
5512 | - else |
5513 | - dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
5514 | + } else { |
5515 | + retval = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
5516 | + if (retval) |
5517 | + goto error; |
5518 | + } |
5519 | |
5520 | retval = platform_device_add_data(pdev, pdata, sizeof(*pdata)); |
5521 | if (retval) |
5522 | diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c |
5523 | index bbd616324faaa..3c90c14390d60 100644 |
5524 | --- a/drivers/usb/host/xhci-pci.c |
5525 | +++ b/drivers/usb/host/xhci-pci.c |
5526 | @@ -21,6 +21,8 @@ |
5527 | #define SSIC_PORT_CFG2_OFFSET 0x30 |
5528 | #define PROG_DONE (1 << 30) |
5529 | #define SSIC_PORT_UNUSED (1 << 31) |
5530 | +#define SPARSE_DISABLE_BIT 17 |
5531 | +#define SPARSE_CNTL_ENABLE 0xC12C |
5532 | |
5533 | /* Device for a quirk */ |
5534 | #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 |
5535 | @@ -149,6 +151,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) |
5536 | (pdev->device == 0x15e0 || pdev->device == 0x15e1)) |
5537 | xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND; |
5538 | |
5539 | + if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) |
5540 | + xhci->quirks |= XHCI_DISABLE_SPARSE; |
5541 | + |
5542 | if (pdev->vendor == PCI_VENDOR_ID_AMD) |
5543 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; |
5544 | |
5545 | @@ -467,6 +472,15 @@ static void xhci_pme_quirk(struct usb_hcd *hcd) |
5546 | readl(reg); |
5547 | } |
5548 | |
5549 | +static void xhci_sparse_control_quirk(struct usb_hcd *hcd) |
5550 | +{ |
5551 | + u32 reg; |
5552 | + |
5553 | + reg = readl(hcd->regs + SPARSE_CNTL_ENABLE); |
5554 | + reg &= ~BIT(SPARSE_DISABLE_BIT); |
5555 | + writel(reg, hcd->regs + SPARSE_CNTL_ENABLE); |
5556 | +} |
5557 | + |
5558 | static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) |
5559 | { |
5560 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
5561 | @@ -486,6 +500,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) |
5562 | if (xhci->quirks & XHCI_SSIC_PORT_UNUSED) |
5563 | xhci_ssic_port_unused_quirk(hcd, true); |
5564 | |
5565 | + if (xhci->quirks & XHCI_DISABLE_SPARSE) |
5566 | + xhci_sparse_control_quirk(hcd); |
5567 | + |
5568 | ret = xhci_suspend(xhci, do_wakeup); |
5569 | if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED)) |
5570 | xhci_ssic_port_unused_quirk(hcd, false); |
5571 | diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c |
5572 | index 0d10ede581cbd..7123ab44671b2 100644 |
5573 | --- a/drivers/usb/host/xhci.c |
5574 | +++ b/drivers/usb/host/xhci.c |
5575 | @@ -982,12 +982,15 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) |
5576 | xhci->shared_hcd->state != HC_STATE_SUSPENDED) |
5577 | return -EINVAL; |
5578 | |
5579 | - xhci_dbc_suspend(xhci); |
5580 | - |
5581 | /* Clear root port wake on bits if wakeup not allowed. */ |
5582 | if (!do_wakeup) |
5583 | xhci_disable_port_wake_on_bits(xhci); |
5584 | |
5585 | + if (!HCD_HW_ACCESSIBLE(hcd)) |
5586 | + return 0; |
5587 | + |
5588 | + xhci_dbc_suspend(xhci); |
5589 | + |
5590 | /* Don't poll the roothubs on bus suspend. */ |
5591 | xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); |
5592 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
5593 | diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h |
5594 | index c656b41b57b5a..b483317bcb17b 100644 |
5595 | --- a/drivers/usb/host/xhci.h |
5596 | +++ b/drivers/usb/host/xhci.h |
5597 | @@ -1873,6 +1873,7 @@ struct xhci_hcd { |
5598 | #define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) |
5599 | #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) |
5600 | #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) |
5601 | +#define XHCI_DISABLE_SPARSE BIT_ULL(38) |
5602 | |
5603 | unsigned int num_active_eps; |
5604 | unsigned int limit_active_eps; |
5605 | diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c |
5606 | index d8d157c4c271d..96495fcd952aa 100644 |
5607 | --- a/drivers/usb/misc/adutux.c |
5608 | +++ b/drivers/usb/misc/adutux.c |
5609 | @@ -209,6 +209,7 @@ static void adu_interrupt_out_callback(struct urb *urb) |
5610 | |
5611 | if (status != 0) { |
5612 | if ((status != -ENOENT) && |
5613 | + (status != -ESHUTDOWN) && |
5614 | (status != -ECONNRESET)) { |
5615 | dev_dbg(&dev->udev->dev, |
5616 | "%s :nonzero status received: %d\n", __func__, |
5617 | diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c |
5618 | index 355a2c7fac0b4..5bb84cb4876a9 100644 |
5619 | --- a/drivers/usb/typec/tcpm/tcpm.c |
5620 | +++ b/drivers/usb/typec/tcpm/tcpm.c |
5621 | @@ -2723,12 +2723,12 @@ static void tcpm_reset_port(struct tcpm_port *port) |
5622 | |
5623 | static void tcpm_detach(struct tcpm_port *port) |
5624 | { |
5625 | - if (!port->attached) |
5626 | - return; |
5627 | - |
5628 | if (tcpm_port_is_disconnected(port)) |
5629 | port->hard_reset_count = 0; |
5630 | |
5631 | + if (!port->attached) |
5632 | + return; |
5633 | + |
5634 | tcpm_reset_port(port); |
5635 | } |
5636 | |
5637 | @@ -3482,7 +3482,7 @@ static void run_state_machine(struct tcpm_port *port) |
5638 | */ |
5639 | tcpm_set_pwr_role(port, TYPEC_SOURCE); |
5640 | tcpm_pd_send_control(port, PD_CTRL_PS_RDY); |
5641 | - tcpm_set_state(port, SRC_STARTUP, 0); |
5642 | + tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START); |
5643 | break; |
5644 | |
5645 | case VCONN_SWAP_ACCEPT: |
5646 | diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c |
5647 | index a0a2d74967ef5..026a37ee41777 100644 |
5648 | --- a/drivers/vhost/vringh.c |
5649 | +++ b/drivers/vhost/vringh.c |
5650 | @@ -274,13 +274,14 @@ __vringh_iov(struct vringh *vrh, u16 i, |
5651 | desc_max = vrh->vring.num; |
5652 | up_next = -1; |
5653 | |
5654 | + /* You must want something! */ |
5655 | + if (WARN_ON(!riov && !wiov)) |
5656 | + return -EINVAL; |
5657 | + |
5658 | if (riov) |
5659 | riov->i = riov->used = 0; |
5660 | - else if (wiov) |
5661 | + if (wiov) |
5662 | wiov->i = wiov->used = 0; |
5663 | - else |
5664 | - /* You must want something! */ |
5665 | - BUG(); |
5666 | |
5667 | for (;;) { |
5668 | void *addr; |
5669 | diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c |
5670 | index 0a3b2b7c78912..c916e91614436 100644 |
5671 | --- a/drivers/video/fbdev/pvr2fb.c |
5672 | +++ b/drivers/video/fbdev/pvr2fb.c |
5673 | @@ -1016,6 +1016,8 @@ static int __init pvr2fb_setup(char *options) |
5674 | if (!options || !*options) |
5675 | return 0; |
5676 | |
5677 | + cable_arg[0] = output_arg[0] = 0; |
5678 | + |
5679 | while ((this_opt = strsep(&options, ","))) { |
5680 | if (!*this_opt) |
5681 | continue; |
5682 | diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c |
5683 | index 1ca880e014769..090cbbf9e1e22 100644 |
5684 | --- a/drivers/w1/masters/mxc_w1.c |
5685 | +++ b/drivers/w1/masters/mxc_w1.c |
5686 | @@ -7,7 +7,7 @@ |
5687 | #include <linux/clk.h> |
5688 | #include <linux/delay.h> |
5689 | #include <linux/io.h> |
5690 | -#include <linux/jiffies.h> |
5691 | +#include <linux/ktime.h> |
5692 | #include <linux/module.h> |
5693 | #include <linux/mod_devicetable.h> |
5694 | #include <linux/platform_device.h> |
5695 | @@ -40,12 +40,12 @@ struct mxc_w1_device { |
5696 | static u8 mxc_w1_ds2_reset_bus(void *data) |
5697 | { |
5698 | struct mxc_w1_device *dev = data; |
5699 | - unsigned long timeout; |
5700 | + ktime_t timeout; |
5701 | |
5702 | writeb(MXC_W1_CONTROL_RPP, dev->regs + MXC_W1_CONTROL); |
5703 | |
5704 | /* Wait for reset sequence 511+512us, use 1500us for sure */ |
5705 | - timeout = jiffies + usecs_to_jiffies(1500); |
5706 | + timeout = ktime_add_us(ktime_get(), 1500); |
5707 | |
5708 | udelay(511 + 512); |
5709 | |
5710 | @@ -55,7 +55,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data) |
5711 | /* PST bit is valid after the RPP bit is self-cleared */ |
5712 | if (!(ctrl & MXC_W1_CONTROL_RPP)) |
5713 | return !(ctrl & MXC_W1_CONTROL_PST); |
5714 | - } while (time_is_after_jiffies(timeout)); |
5715 | + } while (ktime_before(ktime_get(), timeout)); |
5716 | |
5717 | return 1; |
5718 | } |
5719 | @@ -68,12 +68,12 @@ static u8 mxc_w1_ds2_reset_bus(void *data) |
5720 | static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit) |
5721 | { |
5722 | struct mxc_w1_device *dev = data; |
5723 | - unsigned long timeout; |
5724 | + ktime_t timeout; |
5725 | |
5726 | writeb(MXC_W1_CONTROL_WR(bit), dev->regs + MXC_W1_CONTROL); |
5727 | |
5728 | /* Wait for read/write bit (60us, Max 120us), use 200us for sure */ |
5729 | - timeout = jiffies + usecs_to_jiffies(200); |
5730 | + timeout = ktime_add_us(ktime_get(), 200); |
5731 | |
5732 | udelay(60); |
5733 | |
5734 | @@ -83,7 +83,7 @@ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit) |
5735 | /* RDST bit is valid after the WR1/RD bit is self-cleared */ |
5736 | if (!(ctrl & MXC_W1_CONTROL_WR(bit))) |
5737 | return !!(ctrl & MXC_W1_CONTROL_RDST); |
5738 | - } while (time_is_after_jiffies(timeout)); |
5739 | + } while (ktime_before(ktime_get(), timeout)); |
5740 | |
5741 | return 0; |
5742 | } |
5743 | diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c |
5744 | index 2e608ae6cbc78..e0efbc5831986 100644 |
5745 | --- a/drivers/watchdog/rdc321x_wdt.c |
5746 | +++ b/drivers/watchdog/rdc321x_wdt.c |
5747 | @@ -230,6 +230,8 @@ static int rdc321x_wdt_probe(struct platform_device *pdev) |
5748 | |
5749 | rdc321x_wdt_device.sb_pdev = pdata->sb_pdev; |
5750 | rdc321x_wdt_device.base_reg = r->start; |
5751 | + rdc321x_wdt_device.queue = 0; |
5752 | + rdc321x_wdt_device.default_ticks = ticks; |
5753 | |
5754 | err = misc_register(&rdc321x_wdt_misc); |
5755 | if (err < 0) { |
5756 | @@ -244,14 +246,11 @@ static int rdc321x_wdt_probe(struct platform_device *pdev) |
5757 | rdc321x_wdt_device.base_reg, RDC_WDT_RST); |
5758 | |
5759 | init_completion(&rdc321x_wdt_device.stop); |
5760 | - rdc321x_wdt_device.queue = 0; |
5761 | |
5762 | clear_bit(0, &rdc321x_wdt_device.inuse); |
5763 | |
5764 | timer_setup(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0); |
5765 | |
5766 | - rdc321x_wdt_device.default_ticks = ticks; |
5767 | - |
5768 | dev_info(&pdev->dev, "watchdog init success\n"); |
5769 | |
5770 | return 0; |
5771 | diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c |
5772 | index 8edef51c92e59..f026624898e7a 100644 |
5773 | --- a/drivers/xen/events/events_2l.c |
5774 | +++ b/drivers/xen/events/events_2l.c |
5775 | @@ -91,6 +91,8 @@ static void evtchn_2l_unmask(unsigned port) |
5776 | |
5777 | BUG_ON(!irqs_disabled()); |
5778 | |
5779 | + smp_wmb(); /* All writes before unmask must be visible. */ |
5780 | + |
5781 | if (unlikely((cpu != cpu_from_evtchn(port)))) |
5782 | do_hypercall = 1; |
5783 | else { |
5784 | @@ -159,7 +161,7 @@ static inline xen_ulong_t active_evtchns(unsigned int cpu, |
5785 | * a bitset of words which contain pending event bits. The second |
5786 | * level is a bitset of pending events themselves. |
5787 | */ |
5788 | -static void evtchn_2l_handle_events(unsigned cpu) |
5789 | +static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl) |
5790 | { |
5791 | int irq; |
5792 | xen_ulong_t pending_words; |
5793 | @@ -240,10 +242,7 @@ static void evtchn_2l_handle_events(unsigned cpu) |
5794 | |
5795 | /* Process port. */ |
5796 | port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx; |
5797 | - irq = get_evtchn_to_irq(port); |
5798 | - |
5799 | - if (irq != -1) |
5800 | - generic_handle_irq(irq); |
5801 | + handle_irq_for_port(port, ctrl); |
5802 | |
5803 | bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD; |
5804 | |
5805 | diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c |
5806 | index e402620b89202..26df84c45db4e 100644 |
5807 | --- a/drivers/xen/events/events_base.c |
5808 | +++ b/drivers/xen/events/events_base.c |
5809 | @@ -33,6 +33,10 @@ |
5810 | #include <linux/slab.h> |
5811 | #include <linux/irqnr.h> |
5812 | #include <linux/pci.h> |
5813 | +#include <linux/spinlock.h> |
5814 | +#include <linux/cpuhotplug.h> |
5815 | +#include <linux/atomic.h> |
5816 | +#include <linux/ktime.h> |
5817 | |
5818 | #ifdef CONFIG_X86 |
5819 | #include <asm/desc.h> |
5820 | @@ -62,6 +66,15 @@ |
5821 | |
5822 | #include "events_internal.h" |
5823 | |
5824 | +#undef MODULE_PARAM_PREFIX |
5825 | +#define MODULE_PARAM_PREFIX "xen." |
5826 | + |
5827 | +static uint __read_mostly event_loop_timeout = 2; |
5828 | +module_param(event_loop_timeout, uint, 0644); |
5829 | + |
5830 | +static uint __read_mostly event_eoi_delay = 10; |
5831 | +module_param(event_eoi_delay, uint, 0644); |
5832 | + |
5833 | const struct evtchn_ops *evtchn_ops; |
5834 | |
5835 | /* |
5836 | @@ -70,6 +83,24 @@ const struct evtchn_ops *evtchn_ops; |
5837 | */ |
5838 | static DEFINE_MUTEX(irq_mapping_update_lock); |
5839 | |
5840 | +/* |
5841 | + * Lock protecting event handling loop against removing event channels. |
5842 | + * Adding of event channels is no issue as the associated IRQ becomes active |
5843 | + * only after everything is setup (before request_[threaded_]irq() the handler |
5844 | + * can't be entered for an event, as the event channel will be unmasked only |
5845 | + * then). |
5846 | + */ |
5847 | +static DEFINE_RWLOCK(evtchn_rwlock); |
5848 | + |
5849 | +/* |
5850 | + * Lock hierarchy: |
5851 | + * |
5852 | + * irq_mapping_update_lock |
5853 | + * evtchn_rwlock |
5854 | + * IRQ-desc lock |
5855 | + * percpu eoi_list_lock |
5856 | + */ |
5857 | + |
5858 | static LIST_HEAD(xen_irq_list_head); |
5859 | |
5860 | /* IRQ <-> VIRQ mapping. */ |
5861 | @@ -94,17 +125,20 @@ static bool (*pirq_needs_eoi)(unsigned irq); |
5862 | static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY]; |
5863 | |
5864 | static struct irq_chip xen_dynamic_chip; |
5865 | +static struct irq_chip xen_lateeoi_chip; |
5866 | static struct irq_chip xen_percpu_chip; |
5867 | static struct irq_chip xen_pirq_chip; |
5868 | static void enable_dynirq(struct irq_data *data); |
5869 | static void disable_dynirq(struct irq_data *data); |
5870 | |
5871 | +static DEFINE_PER_CPU(unsigned int, irq_epoch); |
5872 | + |
5873 | static void clear_evtchn_to_irq_row(unsigned row) |
5874 | { |
5875 | unsigned col; |
5876 | |
5877 | for (col = 0; col < EVTCHN_PER_ROW; col++) |
5878 | - evtchn_to_irq[row][col] = -1; |
5879 | + WRITE_ONCE(evtchn_to_irq[row][col], -1); |
5880 | } |
5881 | |
5882 | static void clear_evtchn_to_irq_all(void) |
5883 | @@ -141,7 +175,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) |
5884 | clear_evtchn_to_irq_row(row); |
5885 | } |
5886 | |
5887 | - evtchn_to_irq[row][col] = irq; |
5888 | + WRITE_ONCE(evtchn_to_irq[row][col], irq); |
5889 | return 0; |
5890 | } |
5891 | |
5892 | @@ -151,7 +185,7 @@ int get_evtchn_to_irq(unsigned evtchn) |
5893 | return -1; |
5894 | if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) |
5895 | return -1; |
5896 | - return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]; |
5897 | + return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]); |
5898 | } |
5899 | |
5900 | /* Get info for IRQ */ |
5901 | @@ -260,10 +294,14 @@ static void xen_irq_info_cleanup(struct irq_info *info) |
5902 | */ |
5903 | unsigned int evtchn_from_irq(unsigned irq) |
5904 | { |
5905 | - if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)) |
5906 | + const struct irq_info *info = NULL; |
5907 | + |
5908 | + if (likely(irq < nr_irqs)) |
5909 | + info = info_for_irq(irq); |
5910 | + if (!info) |
5911 | return 0; |
5912 | |
5913 | - return info_for_irq(irq)->evtchn; |
5914 | + return info->evtchn; |
5915 | } |
5916 | |
5917 | unsigned irq_from_evtchn(unsigned int evtchn) |
5918 | @@ -374,9 +412,157 @@ void notify_remote_via_irq(int irq) |
5919 | } |
5920 | EXPORT_SYMBOL_GPL(notify_remote_via_irq); |
5921 | |
5922 | +struct lateeoi_work { |
5923 | + struct delayed_work delayed; |
5924 | + spinlock_t eoi_list_lock; |
5925 | + struct list_head eoi_list; |
5926 | +}; |
5927 | + |
5928 | +static DEFINE_PER_CPU(struct lateeoi_work, lateeoi); |
5929 | + |
5930 | +static void lateeoi_list_del(struct irq_info *info) |
5931 | +{ |
5932 | + struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); |
5933 | + unsigned long flags; |
5934 | + |
5935 | + spin_lock_irqsave(&eoi->eoi_list_lock, flags); |
5936 | + list_del_init(&info->eoi_list); |
5937 | + spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); |
5938 | +} |
5939 | + |
5940 | +static void lateeoi_list_add(struct irq_info *info) |
5941 | +{ |
5942 | + struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); |
5943 | + struct irq_info *elem; |
5944 | + u64 now = get_jiffies_64(); |
5945 | + unsigned long delay; |
5946 | + unsigned long flags; |
5947 | + |
5948 | + if (now < info->eoi_time) |
5949 | + delay = info->eoi_time - now; |
5950 | + else |
5951 | + delay = 1; |
5952 | + |
5953 | + spin_lock_irqsave(&eoi->eoi_list_lock, flags); |
5954 | + |
5955 | + if (list_empty(&eoi->eoi_list)) { |
5956 | + list_add(&info->eoi_list, &eoi->eoi_list); |
5957 | + mod_delayed_work_on(info->eoi_cpu, system_wq, |
5958 | + &eoi->delayed, delay); |
5959 | + } else { |
5960 | + list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) { |
5961 | + if (elem->eoi_time <= info->eoi_time) |
5962 | + break; |
5963 | + } |
5964 | + list_add(&info->eoi_list, &elem->eoi_list); |
5965 | + } |
5966 | + |
5967 | + spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); |
5968 | +} |
5969 | + |
5970 | +static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious) |
5971 | +{ |
5972 | + evtchn_port_t evtchn; |
5973 | + unsigned int cpu; |
5974 | + unsigned int delay = 0; |
5975 | + |
5976 | + evtchn = info->evtchn; |
5977 | + if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list)) |
5978 | + return; |
5979 | + |
5980 | + if (spurious) { |
5981 | + if ((1 << info->spurious_cnt) < (HZ << 2)) |
5982 | + info->spurious_cnt++; |
5983 | + if (info->spurious_cnt > 1) { |
5984 | + delay = 1 << (info->spurious_cnt - 2); |
5985 | + if (delay > HZ) |
5986 | + delay = HZ; |
5987 | + if (!info->eoi_time) |
5988 | + info->eoi_cpu = smp_processor_id(); |
5989 | + info->eoi_time = get_jiffies_64() + delay; |
5990 | + } |
5991 | + } else { |
5992 | + info->spurious_cnt = 0; |
5993 | + } |
5994 | + |
5995 | + cpu = info->eoi_cpu; |
5996 | + if (info->eoi_time && |
5997 | + (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) { |
5998 | + lateeoi_list_add(info); |
5999 | + return; |
6000 | + } |
6001 | + |
6002 | + info->eoi_time = 0; |
6003 | + unmask_evtchn(evtchn); |
6004 | +} |
6005 | + |
6006 | +static void xen_irq_lateeoi_worker(struct work_struct *work) |
6007 | +{ |
6008 | + struct lateeoi_work *eoi; |
6009 | + struct irq_info *info; |
6010 | + u64 now = get_jiffies_64(); |
6011 | + unsigned long flags; |
6012 | + |
6013 | + eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed); |
6014 | + |
6015 | + read_lock_irqsave(&evtchn_rwlock, flags); |
6016 | + |
6017 | + while (true) { |
6018 | + spin_lock(&eoi->eoi_list_lock); |
6019 | + |
6020 | + info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, |
6021 | + eoi_list); |
6022 | + |
6023 | + if (info == NULL || now < info->eoi_time) { |
6024 | + spin_unlock(&eoi->eoi_list_lock); |
6025 | + break; |
6026 | + } |
6027 | + |
6028 | + list_del_init(&info->eoi_list); |
6029 | + |
6030 | + spin_unlock(&eoi->eoi_list_lock); |
6031 | + |
6032 | + info->eoi_time = 0; |
6033 | + |
6034 | + xen_irq_lateeoi_locked(info, false); |
6035 | + } |
6036 | + |
6037 | + if (info) |
6038 | + mod_delayed_work_on(info->eoi_cpu, system_wq, |
6039 | + &eoi->delayed, info->eoi_time - now); |
6040 | + |
6041 | + read_unlock_irqrestore(&evtchn_rwlock, flags); |
6042 | +} |
6043 | + |
6044 | +static void xen_cpu_init_eoi(unsigned int cpu) |
6045 | +{ |
6046 | + struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu); |
6047 | + |
6048 | + INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker); |
6049 | + spin_lock_init(&eoi->eoi_list_lock); |
6050 | + INIT_LIST_HEAD(&eoi->eoi_list); |
6051 | +} |
6052 | + |
6053 | +void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) |
6054 | +{ |
6055 | + struct irq_info *info; |
6056 | + unsigned long flags; |
6057 | + |
6058 | + read_lock_irqsave(&evtchn_rwlock, flags); |
6059 | + |
6060 | + info = info_for_irq(irq); |
6061 | + |
6062 | + if (info) |
6063 | + xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS); |
6064 | + |
6065 | + read_unlock_irqrestore(&evtchn_rwlock, flags); |
6066 | +} |
6067 | +EXPORT_SYMBOL_GPL(xen_irq_lateeoi); |
6068 | + |
6069 | static void xen_irq_init(unsigned irq) |
6070 | { |
6071 | struct irq_info *info; |
6072 | + |
6073 | #ifdef CONFIG_SMP |
6074 | /* By default all event channels notify CPU#0. */ |
6075 | cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0)); |
6076 | @@ -391,6 +577,7 @@ static void xen_irq_init(unsigned irq) |
6077 | |
6078 | set_info_for_irq(irq, info); |
6079 | |
6080 | + INIT_LIST_HEAD(&info->eoi_list); |
6081 | list_add_tail(&info->list, &xen_irq_list_head); |
6082 | } |
6083 | |
6084 | @@ -439,16 +626,24 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) |
6085 | static void xen_free_irq(unsigned irq) |
6086 | { |
6087 | struct irq_info *info = info_for_irq(irq); |
6088 | + unsigned long flags; |
6089 | |
6090 | if (WARN_ON(!info)) |
6091 | return; |
6092 | |
6093 | + write_lock_irqsave(&evtchn_rwlock, flags); |
6094 | + |
6095 | + if (!list_empty(&info->eoi_list)) |
6096 | + lateeoi_list_del(info); |
6097 | + |
6098 | list_del(&info->list); |
6099 | |
6100 | set_info_for_irq(irq, NULL); |
6101 | |
6102 | WARN_ON(info->refcnt > 0); |
6103 | |
6104 | + write_unlock_irqrestore(&evtchn_rwlock, flags); |
6105 | + |
6106 | kfree(info); |
6107 | |
6108 | /* Legacy IRQ descriptors are managed by the arch. */ |
6109 | @@ -840,7 +1035,7 @@ int xen_pirq_from_irq(unsigned irq) |
6110 | } |
6111 | EXPORT_SYMBOL_GPL(xen_pirq_from_irq); |
6112 | |
6113 | -int bind_evtchn_to_irq(unsigned int evtchn) |
6114 | +static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip) |
6115 | { |
6116 | int irq; |
6117 | int ret; |
6118 | @@ -857,7 +1052,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) |
6119 | if (irq < 0) |
6120 | goto out; |
6121 | |
6122 | - irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, |
6123 | + irq_set_chip_and_handler_name(irq, chip, |
6124 | handle_edge_irq, "event"); |
6125 | |
6126 | ret = xen_irq_info_evtchn_setup(irq, evtchn); |
6127 | @@ -878,8 +1073,19 @@ out: |
6128 | |
6129 | return irq; |
6130 | } |
6131 | + |
6132 | +int bind_evtchn_to_irq(evtchn_port_t evtchn) |
6133 | +{ |
6134 | + return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip); |
6135 | +} |
6136 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); |
6137 | |
6138 | +int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn) |
6139 | +{ |
6140 | + return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip); |
6141 | +} |
6142 | +EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi); |
6143 | + |
6144 | static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) |
6145 | { |
6146 | struct evtchn_bind_ipi bind_ipi; |
6147 | @@ -921,8 +1127,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) |
6148 | return irq; |
6149 | } |
6150 | |
6151 | -int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, |
6152 | - unsigned int remote_port) |
6153 | +static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain, |
6154 | + evtchn_port_t remote_port, |
6155 | + struct irq_chip *chip) |
6156 | { |
6157 | struct evtchn_bind_interdomain bind_interdomain; |
6158 | int err; |
6159 | @@ -933,10 +1140,26 @@ int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, |
6160 | err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, |
6161 | &bind_interdomain); |
6162 | |
6163 | - return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); |
6164 | + return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port, |
6165 | + chip); |
6166 | +} |
6167 | + |
6168 | +int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, |
6169 | + evtchn_port_t remote_port) |
6170 | +{ |
6171 | + return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, |
6172 | + &xen_dynamic_chip); |
6173 | } |
6174 | EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); |
6175 | |
6176 | +int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain, |
6177 | + evtchn_port_t remote_port) |
6178 | +{ |
6179 | + return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, |
6180 | + &xen_lateeoi_chip); |
6181 | +} |
6182 | +EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi); |
6183 | + |
6184 | static int find_virq(unsigned int virq, unsigned int cpu) |
6185 | { |
6186 | struct evtchn_status status; |
6187 | @@ -1032,14 +1255,15 @@ static void unbind_from_irq(unsigned int irq) |
6188 | mutex_unlock(&irq_mapping_update_lock); |
6189 | } |
6190 | |
6191 | -int bind_evtchn_to_irqhandler(unsigned int evtchn, |
6192 | - irq_handler_t handler, |
6193 | - unsigned long irqflags, |
6194 | - const char *devname, void *dev_id) |
6195 | +static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn, |
6196 | + irq_handler_t handler, |
6197 | + unsigned long irqflags, |
6198 | + const char *devname, void *dev_id, |
6199 | + struct irq_chip *chip) |
6200 | { |
6201 | int irq, retval; |
6202 | |
6203 | - irq = bind_evtchn_to_irq(evtchn); |
6204 | + irq = bind_evtchn_to_irq_chip(evtchn, chip); |
6205 | if (irq < 0) |
6206 | return irq; |
6207 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
6208 | @@ -1050,18 +1274,38 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn, |
6209 | |
6210 | return irq; |
6211 | } |
6212 | + |
6213 | +int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, |
6214 | + irq_handler_t handler, |
6215 | + unsigned long irqflags, |
6216 | + const char *devname, void *dev_id) |
6217 | +{ |
6218 | + return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags, |
6219 | + devname, dev_id, |
6220 | + &xen_dynamic_chip); |
6221 | +} |
6222 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); |
6223 | |
6224 | -int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, |
6225 | - unsigned int remote_port, |
6226 | - irq_handler_t handler, |
6227 | - unsigned long irqflags, |
6228 | - const char *devname, |
6229 | - void *dev_id) |
6230 | +int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn, |
6231 | + irq_handler_t handler, |
6232 | + unsigned long irqflags, |
6233 | + const char *devname, void *dev_id) |
6234 | +{ |
6235 | + return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags, |
6236 | + devname, dev_id, |
6237 | + &xen_lateeoi_chip); |
6238 | +} |
6239 | +EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi); |
6240 | + |
6241 | +static int bind_interdomain_evtchn_to_irqhandler_chip( |
6242 | + unsigned int remote_domain, evtchn_port_t remote_port, |
6243 | + irq_handler_t handler, unsigned long irqflags, |
6244 | + const char *devname, void *dev_id, struct irq_chip *chip) |
6245 | { |
6246 | int irq, retval; |
6247 | |
6248 | - irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port); |
6249 | + irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, |
6250 | + chip); |
6251 | if (irq < 0) |
6252 | return irq; |
6253 | |
6254 | @@ -1073,8 +1317,33 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, |
6255 | |
6256 | return irq; |
6257 | } |
6258 | + |
6259 | +int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, |
6260 | + evtchn_port_t remote_port, |
6261 | + irq_handler_t handler, |
6262 | + unsigned long irqflags, |
6263 | + const char *devname, |
6264 | + void *dev_id) |
6265 | +{ |
6266 | + return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain, |
6267 | + remote_port, handler, irqflags, devname, |
6268 | + dev_id, &xen_dynamic_chip); |
6269 | +} |
6270 | EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); |
6271 | |
6272 | +int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain, |
6273 | + evtchn_port_t remote_port, |
6274 | + irq_handler_t handler, |
6275 | + unsigned long irqflags, |
6276 | + const char *devname, |
6277 | + void *dev_id) |
6278 | +{ |
6279 | + return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain, |
6280 | + remote_port, handler, irqflags, devname, |
6281 | + dev_id, &xen_lateeoi_chip); |
6282 | +} |
6283 | +EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi); |
6284 | + |
6285 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, |
6286 | irq_handler_t handler, |
6287 | unsigned long irqflags, const char *devname, void *dev_id) |
6288 | @@ -1187,7 +1456,7 @@ int evtchn_get(unsigned int evtchn) |
6289 | goto done; |
6290 | |
6291 | err = -EINVAL; |
6292 | - if (info->refcnt <= 0) |
6293 | + if (info->refcnt <= 0 || info->refcnt == SHRT_MAX) |
6294 | goto done; |
6295 | |
6296 | info->refcnt++; |
6297 | @@ -1226,6 +1495,54 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) |
6298 | notify_remote_via_irq(irq); |
6299 | } |
6300 | |
6301 | +struct evtchn_loop_ctrl { |
6302 | + ktime_t timeout; |
6303 | + unsigned count; |
6304 | + bool defer_eoi; |
6305 | +}; |
6306 | + |
6307 | +void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) |
6308 | +{ |
6309 | + int irq; |
6310 | + struct irq_info *info; |
6311 | + |
6312 | + irq = get_evtchn_to_irq(port); |
6313 | + if (irq == -1) |
6314 | + return; |
6315 | + |
6316 | + /* |
6317 | + * Check for timeout every 256 events. |
6318 | + * We are setting the timeout value only after the first 256 |
6319 | + * events in order to not hurt the common case of few loop |
6320 | + * iterations. The 256 is basically an arbitrary value. |
6321 | + * |
6322 | + * In case we are hitting the timeout we need to defer all further |
6323 | + * EOIs in order to ensure to leave the event handling loop rather |
6324 | + * sooner than later. |
6325 | + */ |
6326 | + if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) { |
6327 | + ktime_t kt = ktime_get(); |
6328 | + |
6329 | + if (!ctrl->timeout) { |
6330 | + kt = ktime_add_ms(kt, |
6331 | + jiffies_to_msecs(event_loop_timeout)); |
6332 | + ctrl->timeout = kt; |
6333 | + } else if (kt > ctrl->timeout) { |
6334 | + ctrl->defer_eoi = true; |
6335 | + } |
6336 | + } |
6337 | + |
6338 | + info = info_for_irq(irq); |
6339 | + |
6340 | + if (ctrl->defer_eoi) { |
6341 | + info->eoi_cpu = smp_processor_id(); |
6342 | + info->irq_epoch = __this_cpu_read(irq_epoch); |
6343 | + info->eoi_time = get_jiffies_64() + event_eoi_delay; |
6344 | + } |
6345 | + |
6346 | + generic_handle_irq(irq); |
6347 | +} |
6348 | + |
6349 | static DEFINE_PER_CPU(unsigned, xed_nesting_count); |
6350 | |
6351 | static void __xen_evtchn_do_upcall(void) |
6352 | @@ -1233,6 +1550,9 @@ static void __xen_evtchn_do_upcall(void) |
6353 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
6354 | int cpu = get_cpu(); |
6355 | unsigned count; |
6356 | + struct evtchn_loop_ctrl ctrl = { 0 }; |
6357 | + |
6358 | + read_lock(&evtchn_rwlock); |
6359 | |
6360 | do { |
6361 | vcpu_info->evtchn_upcall_pending = 0; |
6362 | @@ -1240,7 +1560,7 @@ static void __xen_evtchn_do_upcall(void) |
6363 | if (__this_cpu_inc_return(xed_nesting_count) - 1) |
6364 | goto out; |
6365 | |
6366 | - xen_evtchn_handle_events(cpu); |
6367 | + xen_evtchn_handle_events(cpu, &ctrl); |
6368 | |
6369 | BUG_ON(!irqs_disabled()); |
6370 | |
6371 | @@ -1249,6 +1569,14 @@ static void __xen_evtchn_do_upcall(void) |
6372 | } while (count != 1 || vcpu_info->evtchn_upcall_pending); |
6373 | |
6374 | out: |
6375 | + read_unlock(&evtchn_rwlock); |
6376 | + |
6377 | + /* |
6378 | + * Increment irq_epoch only now to defer EOIs only for |
6379 | + * xen_irq_lateeoi() invocations occurring from inside the loop |
6380 | + * above. |
6381 | + */ |
6382 | + __this_cpu_inc(irq_epoch); |
6383 | |
6384 | put_cpu(); |
6385 | } |
6386 | @@ -1615,6 +1943,21 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { |
6387 | .irq_retrigger = retrigger_dynirq, |
6388 | }; |
6389 | |
6390 | +static struct irq_chip xen_lateeoi_chip __read_mostly = { |
6391 | + /* The chip name needs to contain "xen-dyn" for irqbalance to work. */ |
6392 | + .name = "xen-dyn-lateeoi", |
6393 | + |
6394 | + .irq_disable = disable_dynirq, |
6395 | + .irq_mask = disable_dynirq, |
6396 | + .irq_unmask = enable_dynirq, |
6397 | + |
6398 | + .irq_ack = mask_ack_dynirq, |
6399 | + .irq_mask_ack = mask_ack_dynirq, |
6400 | + |
6401 | + .irq_set_affinity = set_affinity_irq, |
6402 | + .irq_retrigger = retrigger_dynirq, |
6403 | +}; |
6404 | + |
6405 | static struct irq_chip xen_pirq_chip __read_mostly = { |
6406 | .name = "xen-pirq", |
6407 | |
6408 | @@ -1681,12 +2024,31 @@ void xen_callback_vector(void) |
6409 | void xen_callback_vector(void) {} |
6410 | #endif |
6411 | |
6412 | -#undef MODULE_PARAM_PREFIX |
6413 | -#define MODULE_PARAM_PREFIX "xen." |
6414 | - |
6415 | static bool fifo_events = true; |
6416 | module_param(fifo_events, bool, 0); |
6417 | |
6418 | +static int xen_evtchn_cpu_prepare(unsigned int cpu) |
6419 | +{ |
6420 | + int ret = 0; |
6421 | + |
6422 | + xen_cpu_init_eoi(cpu); |
6423 | + |
6424 | + if (evtchn_ops->percpu_init) |
6425 | + ret = evtchn_ops->percpu_init(cpu); |
6426 | + |
6427 | + return ret; |
6428 | +} |
6429 | + |
6430 | +static int xen_evtchn_cpu_dead(unsigned int cpu) |
6431 | +{ |
6432 | + int ret = 0; |
6433 | + |
6434 | + if (evtchn_ops->percpu_deinit) |
6435 | + ret = evtchn_ops->percpu_deinit(cpu); |
6436 | + |
6437 | + return ret; |
6438 | +} |
6439 | + |
6440 | void __init xen_init_IRQ(void) |
6441 | { |
6442 | int ret = -EINVAL; |
6443 | @@ -1697,6 +2059,12 @@ void __init xen_init_IRQ(void) |
6444 | if (ret < 0) |
6445 | xen_evtchn_2l_init(); |
6446 | |
6447 | + xen_cpu_init_eoi(smp_processor_id()); |
6448 | + |
6449 | + cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE, |
6450 | + "xen/evtchn:prepare", |
6451 | + xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead); |
6452 | + |
6453 | evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()), |
6454 | sizeof(*evtchn_to_irq), GFP_KERNEL); |
6455 | BUG_ON(!evtchn_to_irq); |
6456 | diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c |
6457 | index 76b318e88382e..33462521bfd0f 100644 |
6458 | --- a/drivers/xen/events/events_fifo.c |
6459 | +++ b/drivers/xen/events/events_fifo.c |
6460 | @@ -227,19 +227,25 @@ static bool evtchn_fifo_is_masked(unsigned port) |
6461 | return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); |
6462 | } |
6463 | /* |
6464 | - * Clear MASKED, spinning if BUSY is set. |
6465 | + * Clear MASKED if not PENDING, spinning if BUSY is set. |
6466 | + * Return true if mask was cleared. |
6467 | */ |
6468 | -static void clear_masked(volatile event_word_t *word) |
6469 | +static bool clear_masked_cond(volatile event_word_t *word) |
6470 | { |
6471 | event_word_t new, old, w; |
6472 | |
6473 | w = *word; |
6474 | |
6475 | do { |
6476 | + if (w & (1 << EVTCHN_FIFO_PENDING)) |
6477 | + return false; |
6478 | + |
6479 | old = w & ~(1 << EVTCHN_FIFO_BUSY); |
6480 | new = old & ~(1 << EVTCHN_FIFO_MASKED); |
6481 | w = sync_cmpxchg(word, old, new); |
6482 | } while (w != old); |
6483 | + |
6484 | + return true; |
6485 | } |
6486 | |
6487 | static void evtchn_fifo_unmask(unsigned port) |
6488 | @@ -248,8 +254,7 @@ static void evtchn_fifo_unmask(unsigned port) |
6489 | |
6490 | BUG_ON(!irqs_disabled()); |
6491 | |
6492 | - clear_masked(word); |
6493 | - if (evtchn_fifo_is_pending(port)) { |
6494 | + if (!clear_masked_cond(word)) { |
6495 | struct evtchn_unmask unmask = { .port = port }; |
6496 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); |
6497 | } |
6498 | @@ -270,19 +275,9 @@ static uint32_t clear_linked(volatile event_word_t *word) |
6499 | return w & EVTCHN_FIFO_LINK_MASK; |
6500 | } |
6501 | |
6502 | -static void handle_irq_for_port(unsigned port) |
6503 | -{ |
6504 | - int irq; |
6505 | - |
6506 | - irq = get_evtchn_to_irq(port); |
6507 | - if (irq != -1) |
6508 | - generic_handle_irq(irq); |
6509 | -} |
6510 | - |
6511 | -static void consume_one_event(unsigned cpu, |
6512 | +static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl, |
6513 | struct evtchn_fifo_control_block *control_block, |
6514 | - unsigned priority, unsigned long *ready, |
6515 | - bool drop) |
6516 | + unsigned priority, unsigned long *ready) |
6517 | { |
6518 | struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); |
6519 | uint32_t head; |
6520 | @@ -315,16 +310,17 @@ static void consume_one_event(unsigned cpu, |
6521 | clear_bit(priority, ready); |
6522 | |
6523 | if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) { |
6524 | - if (unlikely(drop)) |
6525 | + if (unlikely(!ctrl)) |
6526 | pr_warn("Dropping pending event for port %u\n", port); |
6527 | else |
6528 | - handle_irq_for_port(port); |
6529 | + handle_irq_for_port(port, ctrl); |
6530 | } |
6531 | |
6532 | q->head[priority] = head; |
6533 | } |
6534 | |
6535 | -static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) |
6536 | +static void __evtchn_fifo_handle_events(unsigned cpu, |
6537 | + struct evtchn_loop_ctrl *ctrl) |
6538 | { |
6539 | struct evtchn_fifo_control_block *control_block; |
6540 | unsigned long ready; |
6541 | @@ -336,14 +332,15 @@ static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) |
6542 | |
6543 | while (ready) { |
6544 | q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); |
6545 | - consume_one_event(cpu, control_block, q, &ready, drop); |
6546 | + consume_one_event(cpu, ctrl, control_block, q, &ready); |
6547 | ready |= xchg(&control_block->ready, 0); |
6548 | } |
6549 | } |
6550 | |
6551 | -static void evtchn_fifo_handle_events(unsigned cpu) |
6552 | +static void evtchn_fifo_handle_events(unsigned cpu, |
6553 | + struct evtchn_loop_ctrl *ctrl) |
6554 | { |
6555 | - __evtchn_fifo_handle_events(cpu, false); |
6556 | + __evtchn_fifo_handle_events(cpu, ctrl); |
6557 | } |
6558 | |
6559 | static void evtchn_fifo_resume(void) |
6560 | @@ -380,21 +377,6 @@ static void evtchn_fifo_resume(void) |
6561 | event_array_pages = 0; |
6562 | } |
6563 | |
6564 | -static const struct evtchn_ops evtchn_ops_fifo = { |
6565 | - .max_channels = evtchn_fifo_max_channels, |
6566 | - .nr_channels = evtchn_fifo_nr_channels, |
6567 | - .setup = evtchn_fifo_setup, |
6568 | - .bind_to_cpu = evtchn_fifo_bind_to_cpu, |
6569 | - .clear_pending = evtchn_fifo_clear_pending, |
6570 | - .set_pending = evtchn_fifo_set_pending, |
6571 | - .is_pending = evtchn_fifo_is_pending, |
6572 | - .test_and_set_mask = evtchn_fifo_test_and_set_mask, |
6573 | - .mask = evtchn_fifo_mask, |
6574 | - .unmask = evtchn_fifo_unmask, |
6575 | - .handle_events = evtchn_fifo_handle_events, |
6576 | - .resume = evtchn_fifo_resume, |
6577 | -}; |
6578 | - |
6579 | static int evtchn_fifo_alloc_control_block(unsigned cpu) |
6580 | { |
6581 | void *control_block = NULL; |
6582 | @@ -417,19 +399,36 @@ static int evtchn_fifo_alloc_control_block(unsigned cpu) |
6583 | return ret; |
6584 | } |
6585 | |
6586 | -static int xen_evtchn_cpu_prepare(unsigned int cpu) |
6587 | +static int evtchn_fifo_percpu_init(unsigned int cpu) |
6588 | { |
6589 | if (!per_cpu(cpu_control_block, cpu)) |
6590 | return evtchn_fifo_alloc_control_block(cpu); |
6591 | return 0; |
6592 | } |
6593 | |
6594 | -static int xen_evtchn_cpu_dead(unsigned int cpu) |
6595 | +static int evtchn_fifo_percpu_deinit(unsigned int cpu) |
6596 | { |
6597 | - __evtchn_fifo_handle_events(cpu, true); |
6598 | + __evtchn_fifo_handle_events(cpu, NULL); |
6599 | return 0; |
6600 | } |
6601 | |
6602 | +static const struct evtchn_ops evtchn_ops_fifo = { |
6603 | + .max_channels = evtchn_fifo_max_channels, |
6604 | + .nr_channels = evtchn_fifo_nr_channels, |
6605 | + .setup = evtchn_fifo_setup, |
6606 | + .bind_to_cpu = evtchn_fifo_bind_to_cpu, |
6607 | + .clear_pending = evtchn_fifo_clear_pending, |
6608 | + .set_pending = evtchn_fifo_set_pending, |
6609 | + .is_pending = evtchn_fifo_is_pending, |
6610 | + .test_and_set_mask = evtchn_fifo_test_and_set_mask, |
6611 | + .mask = evtchn_fifo_mask, |
6612 | + .unmask = evtchn_fifo_unmask, |
6613 | + .handle_events = evtchn_fifo_handle_events, |
6614 | + .resume = evtchn_fifo_resume, |
6615 | + .percpu_init = evtchn_fifo_percpu_init, |
6616 | + .percpu_deinit = evtchn_fifo_percpu_deinit, |
6617 | +}; |
6618 | + |
6619 | int __init xen_evtchn_fifo_init(void) |
6620 | { |
6621 | int cpu = smp_processor_id(); |
6622 | @@ -443,9 +442,5 @@ int __init xen_evtchn_fifo_init(void) |
6623 | |
6624 | evtchn_ops = &evtchn_ops_fifo; |
6625 | |
6626 | - cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE, |
6627 | - "xen/evtchn:prepare", |
6628 | - xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead); |
6629 | - |
6630 | return ret; |
6631 | } |
6632 | diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h |
6633 | index 82938cff6c7a8..a35c8c7ac6066 100644 |
6634 | --- a/drivers/xen/events/events_internal.h |
6635 | +++ b/drivers/xen/events/events_internal.h |
6636 | @@ -30,11 +30,16 @@ enum xen_irq_type { |
6637 | */ |
6638 | struct irq_info { |
6639 | struct list_head list; |
6640 | - int refcnt; |
6641 | + struct list_head eoi_list; |
6642 | + short refcnt; |
6643 | + short spurious_cnt; |
6644 | enum xen_irq_type type; /* type */ |
6645 | unsigned irq; |
6646 | unsigned int evtchn; /* event channel */ |
6647 | unsigned short cpu; /* cpu bound */ |
6648 | + unsigned short eoi_cpu; /* EOI must happen on this cpu */ |
6649 | + unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ |
6650 | + u64 eoi_time; /* Time in jiffies when to EOI. */ |
6651 | |
6652 | union { |
6653 | unsigned short virq; |
6654 | @@ -53,6 +58,8 @@ struct irq_info { |
6655 | #define PIRQ_SHAREABLE (1 << 1) |
6656 | #define PIRQ_MSI_GROUP (1 << 2) |
6657 | |
6658 | +struct evtchn_loop_ctrl; |
6659 | + |
6660 | struct evtchn_ops { |
6661 | unsigned (*max_channels)(void); |
6662 | unsigned (*nr_channels)(void); |
6663 | @@ -67,14 +74,18 @@ struct evtchn_ops { |
6664 | void (*mask)(unsigned port); |
6665 | void (*unmask)(unsigned port); |
6666 | |
6667 | - void (*handle_events)(unsigned cpu); |
6668 | + void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl); |
6669 | void (*resume)(void); |
6670 | + |
6671 | + int (*percpu_init)(unsigned int cpu); |
6672 | + int (*percpu_deinit)(unsigned int cpu); |
6673 | }; |
6674 | |
6675 | extern const struct evtchn_ops *evtchn_ops; |
6676 | |
6677 | extern int **evtchn_to_irq; |
6678 | int get_evtchn_to_irq(unsigned int evtchn); |
6679 | +void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl); |
6680 | |
6681 | struct irq_info *info_for_irq(unsigned irq); |
6682 | unsigned cpu_from_irq(unsigned irq); |
6683 | @@ -132,9 +143,10 @@ static inline void unmask_evtchn(unsigned port) |
6684 | return evtchn_ops->unmask(port); |
6685 | } |
6686 | |
6687 | -static inline void xen_evtchn_handle_events(unsigned cpu) |
6688 | +static inline void xen_evtchn_handle_events(unsigned cpu, |
6689 | + struct evtchn_loop_ctrl *ctrl) |
6690 | { |
6691 | - return evtchn_ops->handle_events(cpu); |
6692 | + return evtchn_ops->handle_events(cpu, ctrl); |
6693 | } |
6694 | |
6695 | static inline void xen_evtchn_resume(void) |
6696 | diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c |
6697 | index 052b55a14ebc6..a43930191e202 100644 |
6698 | --- a/drivers/xen/evtchn.c |
6699 | +++ b/drivers/xen/evtchn.c |
6700 | @@ -166,7 +166,6 @@ static irqreturn_t evtchn_interrupt(int irq, void *data) |
6701 | "Interrupt for port %d, but apparently not enabled; per-user %p\n", |
6702 | evtchn->port, u); |
6703 | |
6704 | - disable_irq_nosync(irq); |
6705 | evtchn->enabled = false; |
6706 | |
6707 | spin_lock(&u->ring_prod_lock); |
6708 | @@ -292,7 +291,7 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf, |
6709 | evtchn = find_evtchn(u, port); |
6710 | if (evtchn && !evtchn->enabled) { |
6711 | evtchn->enabled = true; |
6712 | - enable_irq(irq_from_evtchn(port)); |
6713 | + xen_irq_lateeoi(irq_from_evtchn(port), 0); |
6714 | } |
6715 | } |
6716 | |
6717 | @@ -392,8 +391,8 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) |
6718 | if (rc < 0) |
6719 | goto err; |
6720 | |
6721 | - rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, |
6722 | - u->name, evtchn); |
6723 | + rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0, |
6724 | + u->name, evtchn); |
6725 | if (rc < 0) |
6726 | goto err; |
6727 | |
6728 | diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c |
6729 | index ffe9bd843922b..9439de2ca0e45 100644 |
6730 | --- a/drivers/xen/pvcalls-back.c |
6731 | +++ b/drivers/xen/pvcalls-back.c |
6732 | @@ -66,6 +66,7 @@ struct sock_mapping { |
6733 | atomic_t write; |
6734 | atomic_t io; |
6735 | atomic_t release; |
6736 | + atomic_t eoi; |
6737 | void (*saved_data_ready)(struct sock *sk); |
6738 | struct pvcalls_ioworker ioworker; |
6739 | }; |
6740 | @@ -87,7 +88,7 @@ static int pvcalls_back_release_active(struct xenbus_device *dev, |
6741 | struct pvcalls_fedata *fedata, |
6742 | struct sock_mapping *map); |
6743 | |
6744 | -static void pvcalls_conn_back_read(void *opaque) |
6745 | +static bool pvcalls_conn_back_read(void *opaque) |
6746 | { |
6747 | struct sock_mapping *map = (struct sock_mapping *)opaque; |
6748 | struct msghdr msg; |
6749 | @@ -107,17 +108,17 @@ static void pvcalls_conn_back_read(void *opaque) |
6750 | virt_mb(); |
6751 | |
6752 | if (error) |
6753 | - return; |
6754 | + return false; |
6755 | |
6756 | size = pvcalls_queued(prod, cons, array_size); |
6757 | if (size >= array_size) |
6758 | - return; |
6759 | + return false; |
6760 | spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); |
6761 | if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) { |
6762 | atomic_set(&map->read, 0); |
6763 | spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, |
6764 | flags); |
6765 | - return; |
6766 | + return true; |
6767 | } |
6768 | spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); |
6769 | wanted = array_size - size; |
6770 | @@ -141,7 +142,7 @@ static void pvcalls_conn_back_read(void *opaque) |
6771 | ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT); |
6772 | WARN_ON(ret > wanted); |
6773 | if (ret == -EAGAIN) /* shouldn't happen */ |
6774 | - return; |
6775 | + return true; |
6776 | if (!ret) |
6777 | ret = -ENOTCONN; |
6778 | spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); |
6779 | @@ -160,10 +161,10 @@ static void pvcalls_conn_back_read(void *opaque) |
6780 | virt_wmb(); |
6781 | notify_remote_via_irq(map->irq); |
6782 | |
6783 | - return; |
6784 | + return true; |
6785 | } |
6786 | |
6787 | -static void pvcalls_conn_back_write(struct sock_mapping *map) |
6788 | +static bool pvcalls_conn_back_write(struct sock_mapping *map) |
6789 | { |
6790 | struct pvcalls_data_intf *intf = map->ring; |
6791 | struct pvcalls_data *data = &map->data; |
6792 | @@ -180,7 +181,7 @@ static void pvcalls_conn_back_write(struct sock_mapping *map) |
6793 | array_size = XEN_FLEX_RING_SIZE(map->ring_order); |
6794 | size = pvcalls_queued(prod, cons, array_size); |
6795 | if (size == 0) |
6796 | - return; |
6797 | + return false; |
6798 | |
6799 | memset(&msg, 0, sizeof(msg)); |
6800 | msg.msg_flags |= MSG_DONTWAIT; |
6801 | @@ -198,12 +199,11 @@ static void pvcalls_conn_back_write(struct sock_mapping *map) |
6802 | |
6803 | atomic_set(&map->write, 0); |
6804 | ret = inet_sendmsg(map->sock, &msg, size); |
6805 | - if (ret == -EAGAIN || (ret >= 0 && ret < size)) { |
6806 | + if (ret == -EAGAIN) { |
6807 | atomic_inc(&map->write); |
6808 | atomic_inc(&map->io); |
6809 | + return true; |
6810 | } |
6811 | - if (ret == -EAGAIN) |
6812 | - return; |
6813 | |
6814 | /* write the data, then update the indexes */ |
6815 | virt_wmb(); |
6816 | @@ -216,9 +216,13 @@ static void pvcalls_conn_back_write(struct sock_mapping *map) |
6817 | } |
6818 | /* update the indexes, then notify the other end */ |
6819 | virt_wmb(); |
6820 | - if (prod != cons + ret) |
6821 | + if (prod != cons + ret) { |
6822 | atomic_inc(&map->write); |
6823 | + atomic_inc(&map->io); |
6824 | + } |
6825 | notify_remote_via_irq(map->irq); |
6826 | + |
6827 | + return true; |
6828 | } |
6829 | |
6830 | static void pvcalls_back_ioworker(struct work_struct *work) |
6831 | @@ -227,6 +231,7 @@ static void pvcalls_back_ioworker(struct work_struct *work) |
6832 | struct pvcalls_ioworker, register_work); |
6833 | struct sock_mapping *map = container_of(ioworker, struct sock_mapping, |
6834 | ioworker); |
6835 | + unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; |
6836 | |
6837 | while (atomic_read(&map->io) > 0) { |
6838 | if (atomic_read(&map->release) > 0) { |
6839 | @@ -234,10 +239,18 @@ static void pvcalls_back_ioworker(struct work_struct *work) |
6840 | return; |
6841 | } |
6842 | |
6843 | - if (atomic_read(&map->read) > 0) |
6844 | - pvcalls_conn_back_read(map); |
6845 | - if (atomic_read(&map->write) > 0) |
6846 | - pvcalls_conn_back_write(map); |
6847 | + if (atomic_read(&map->read) > 0 && |
6848 | + pvcalls_conn_back_read(map)) |
6849 | + eoi_flags = 0; |
6850 | + if (atomic_read(&map->write) > 0 && |
6851 | + pvcalls_conn_back_write(map)) |
6852 | + eoi_flags = 0; |
6853 | + |
6854 | + if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) { |
6855 | + atomic_set(&map->eoi, 0); |
6856 | + xen_irq_lateeoi(map->irq, eoi_flags); |
6857 | + eoi_flags = XEN_EOI_FLAG_SPURIOUS; |
6858 | + } |
6859 | |
6860 | atomic_dec(&map->io); |
6861 | } |
6862 | @@ -334,12 +347,9 @@ static struct sock_mapping *pvcalls_new_active_socket( |
6863 | goto out; |
6864 | map->bytes = page; |
6865 | |
6866 | - ret = bind_interdomain_evtchn_to_irqhandler(fedata->dev->otherend_id, |
6867 | - evtchn, |
6868 | - pvcalls_back_conn_event, |
6869 | - 0, |
6870 | - "pvcalls-backend", |
6871 | - map); |
6872 | + ret = bind_interdomain_evtchn_to_irqhandler_lateeoi( |
6873 | + fedata->dev->otherend_id, evtchn, |
6874 | + pvcalls_back_conn_event, 0, "pvcalls-backend", map); |
6875 | if (ret < 0) |
6876 | goto out; |
6877 | map->irq = ret; |
6878 | @@ -873,15 +883,18 @@ static irqreturn_t pvcalls_back_event(int irq, void *dev_id) |
6879 | { |
6880 | struct xenbus_device *dev = dev_id; |
6881 | struct pvcalls_fedata *fedata = NULL; |
6882 | + unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; |
6883 | |
6884 | - if (dev == NULL) |
6885 | - return IRQ_HANDLED; |
6886 | + if (dev) { |
6887 | + fedata = dev_get_drvdata(&dev->dev); |
6888 | + if (fedata) { |
6889 | + pvcalls_back_work(fedata); |
6890 | + eoi_flags = 0; |
6891 | + } |
6892 | + } |
6893 | |
6894 | - fedata = dev_get_drvdata(&dev->dev); |
6895 | - if (fedata == NULL) |
6896 | - return IRQ_HANDLED; |
6897 | + xen_irq_lateeoi(irq, eoi_flags); |
6898 | |
6899 | - pvcalls_back_work(fedata); |
6900 | return IRQ_HANDLED; |
6901 | } |
6902 | |
6903 | @@ -891,12 +904,15 @@ static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map) |
6904 | struct pvcalls_ioworker *iow; |
6905 | |
6906 | if (map == NULL || map->sock == NULL || map->sock->sk == NULL || |
6907 | - map->sock->sk->sk_user_data != map) |
6908 | + map->sock->sk->sk_user_data != map) { |
6909 | + xen_irq_lateeoi(irq, 0); |
6910 | return IRQ_HANDLED; |
6911 | + } |
6912 | |
6913 | iow = &map->ioworker; |
6914 | |
6915 | atomic_inc(&map->write); |
6916 | + atomic_inc(&map->eoi); |
6917 | atomic_inc(&map->io); |
6918 | queue_work(iow->wq, &iow->register_work); |
6919 | |
6920 | @@ -931,7 +947,7 @@ static int backend_connect(struct xenbus_device *dev) |
6921 | goto error; |
6922 | } |
6923 | |
6924 | - err = bind_interdomain_evtchn_to_irq(dev->otherend_id, evtchn); |
6925 | + err = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn); |
6926 | if (err < 0) |
6927 | goto error; |
6928 | fedata->irq = err; |
6929 | diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c |
6930 | index 097410a7cdb74..adf3aae2939f5 100644 |
6931 | --- a/drivers/xen/xen-pciback/pci_stub.c |
6932 | +++ b/drivers/xen/xen-pciback/pci_stub.c |
6933 | @@ -733,10 +733,17 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev, |
6934 | wmb(); |
6935 | notify_remote_via_irq(pdev->evtchn_irq); |
6936 | |
6937 | + /* Enable IRQ to signal "request done". */ |
6938 | + xen_pcibk_lateeoi(pdev, 0); |
6939 | + |
6940 | ret = wait_event_timeout(xen_pcibk_aer_wait_queue, |
6941 | !(test_bit(_XEN_PCIB_active, (unsigned long *) |
6942 | &sh_info->flags)), 300*HZ); |
6943 | |
6944 | + /* Enable IRQ for pcifront request if not already active. */ |
6945 | + if (!test_bit(_PDEVF_op_active, &pdev->flags)) |
6946 | + xen_pcibk_lateeoi(pdev, 0); |
6947 | + |
6948 | if (!ret) { |
6949 | if (test_bit(_XEN_PCIB_active, |
6950 | (unsigned long *)&sh_info->flags)) { |
6951 | @@ -750,13 +757,6 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev, |
6952 | } |
6953 | clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags); |
6954 | |
6955 | - if (test_bit(_XEN_PCIF_active, |
6956 | - (unsigned long *)&sh_info->flags)) { |
6957 | - dev_dbg(&psdev->dev->dev, |
6958 | - "schedule pci_conf service in " DRV_NAME "\n"); |
6959 | - xen_pcibk_test_and_schedule_op(psdev->pdev); |
6960 | - } |
6961 | - |
6962 | res = (pci_ers_result_t)aer_op->err; |
6963 | return res; |
6964 | } |
6965 | diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h |
6966 | index 263c059bff900..235cdfe13494f 100644 |
6967 | --- a/drivers/xen/xen-pciback/pciback.h |
6968 | +++ b/drivers/xen/xen-pciback/pciback.h |
6969 | @@ -14,6 +14,7 @@ |
6970 | #include <linux/spinlock.h> |
6971 | #include <linux/workqueue.h> |
6972 | #include <linux/atomic.h> |
6973 | +#include <xen/events.h> |
6974 | #include <xen/interface/io/pciif.h> |
6975 | |
6976 | #define DRV_NAME "xen-pciback" |
6977 | @@ -27,6 +28,8 @@ struct pci_dev_entry { |
6978 | #define PDEVF_op_active (1<<(_PDEVF_op_active)) |
6979 | #define _PCIB_op_pending (1) |
6980 | #define PCIB_op_pending (1<<(_PCIB_op_pending)) |
6981 | +#define _EOI_pending (2) |
6982 | +#define EOI_pending (1<<(_EOI_pending)) |
6983 | |
6984 | struct xen_pcibk_device { |
6985 | void *pci_dev_data; |
6986 | @@ -182,12 +185,17 @@ static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev) |
6987 | irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id); |
6988 | void xen_pcibk_do_op(struct work_struct *data); |
6989 | |
6990 | +static inline void xen_pcibk_lateeoi(struct xen_pcibk_device *pdev, |
6991 | + unsigned int eoi_flag) |
6992 | +{ |
6993 | + if (test_and_clear_bit(_EOI_pending, &pdev->flags)) |
6994 | + xen_irq_lateeoi(pdev->evtchn_irq, eoi_flag); |
6995 | +} |
6996 | + |
6997 | int xen_pcibk_xenbus_register(void); |
6998 | void xen_pcibk_xenbus_unregister(void); |
6999 | |
7000 | extern int verbose_request; |
7001 | - |
7002 | -void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev); |
7003 | #endif |
7004 | |
7005 | /* Handles shared IRQs that can to device domain and control domain. */ |
7006 | diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c |
7007 | index 787966f445895..c4ed2c634ca7e 100644 |
7008 | --- a/drivers/xen/xen-pciback/pciback_ops.c |
7009 | +++ b/drivers/xen/xen-pciback/pciback_ops.c |
7010 | @@ -297,26 +297,41 @@ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, |
7011 | return 0; |
7012 | } |
7013 | #endif |
7014 | + |
7015 | +static inline bool xen_pcibk_test_op_pending(struct xen_pcibk_device *pdev) |
7016 | +{ |
7017 | + return test_bit(_XEN_PCIF_active, |
7018 | + (unsigned long *)&pdev->sh_info->flags) && |
7019 | + !test_and_set_bit(_PDEVF_op_active, &pdev->flags); |
7020 | +} |
7021 | + |
7022 | /* |
7023 | * Now the same evtchn is used for both pcifront conf_read_write request |
7024 | * as well as pcie aer front end ack. We use a new work_queue to schedule |
7025 | * xen_pcibk conf_read_write service for avoiding confict with aer_core |
7026 | * do_recovery job which also use the system default work_queue |
7027 | */ |
7028 | -void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev) |
7029 | +static void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev) |
7030 | { |
7031 | + bool eoi = true; |
7032 | + |
7033 | /* Check that frontend is requesting an operation and that we are not |
7034 | * already processing a request */ |
7035 | - if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags) |
7036 | - && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) { |
7037 | + if (xen_pcibk_test_op_pending(pdev)) { |
7038 | schedule_work(&pdev->op_work); |
7039 | + eoi = false; |
7040 | } |
7041 | /*_XEN_PCIB_active should have been cleared by pcifront. And also make |
7042 | sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/ |
7043 | if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags) |
7044 | && test_bit(_PCIB_op_pending, &pdev->flags)) { |
7045 | wake_up(&xen_pcibk_aer_wait_queue); |
7046 | + eoi = false; |
7047 | } |
7048 | + |
7049 | + /* EOI if there was nothing to do. */ |
7050 | + if (eoi) |
7051 | + xen_pcibk_lateeoi(pdev, XEN_EOI_FLAG_SPURIOUS); |
7052 | } |
7053 | |
7054 | /* Performing the configuration space reads/writes must not be done in atomic |
7055 | @@ -324,10 +339,8 @@ void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev) |
7056 | * use of semaphores). This function is intended to be called from a work |
7057 | * queue in process context taking a struct xen_pcibk_device as a parameter */ |
7058 | |
7059 | -void xen_pcibk_do_op(struct work_struct *data) |
7060 | +static void xen_pcibk_do_one_op(struct xen_pcibk_device *pdev) |
7061 | { |
7062 | - struct xen_pcibk_device *pdev = |
7063 | - container_of(data, struct xen_pcibk_device, op_work); |
7064 | struct pci_dev *dev; |
7065 | struct xen_pcibk_dev_data *dev_data = NULL; |
7066 | struct xen_pci_op *op = &pdev->op; |
7067 | @@ -400,16 +413,31 @@ void xen_pcibk_do_op(struct work_struct *data) |
7068 | smp_mb__before_atomic(); /* /after/ clearing PCIF_active */ |
7069 | clear_bit(_PDEVF_op_active, &pdev->flags); |
7070 | smp_mb__after_atomic(); /* /before/ final check for work */ |
7071 | +} |
7072 | |
7073 | - /* Check to see if the driver domain tried to start another request in |
7074 | - * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. |
7075 | - */ |
7076 | - xen_pcibk_test_and_schedule_op(pdev); |
7077 | +void xen_pcibk_do_op(struct work_struct *data) |
7078 | +{ |
7079 | + struct xen_pcibk_device *pdev = |
7080 | + container_of(data, struct xen_pcibk_device, op_work); |
7081 | + |
7082 | + do { |
7083 | + xen_pcibk_do_one_op(pdev); |
7084 | + } while (xen_pcibk_test_op_pending(pdev)); |
7085 | + |
7086 | + xen_pcibk_lateeoi(pdev, 0); |
7087 | } |
7088 | |
7089 | irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id) |
7090 | { |
7091 | struct xen_pcibk_device *pdev = dev_id; |
7092 | + bool eoi; |
7093 | + |
7094 | + /* IRQs might come in before pdev->evtchn_irq is written. */ |
7095 | + if (unlikely(pdev->evtchn_irq != irq)) |
7096 | + pdev->evtchn_irq = irq; |
7097 | + |
7098 | + eoi = test_and_set_bit(_EOI_pending, &pdev->flags); |
7099 | + WARN(eoi, "IRQ while EOI pending\n"); |
7100 | |
7101 | xen_pcibk_test_and_schedule_op(pdev); |
7102 | |
7103 | diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c |
7104 | index 833b2d2c4318f..e7a6702359655 100644 |
7105 | --- a/drivers/xen/xen-pciback/xenbus.c |
7106 | +++ b/drivers/xen/xen-pciback/xenbus.c |
7107 | @@ -123,7 +123,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref, |
7108 | |
7109 | pdev->sh_info = vaddr; |
7110 | |
7111 | - err = bind_interdomain_evtchn_to_irqhandler( |
7112 | + err = bind_interdomain_evtchn_to_irqhandler_lateeoi( |
7113 | pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event, |
7114 | 0, DRV_NAME, pdev); |
7115 | if (err < 0) { |
7116 | diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c |
7117 | index ba0942e481bc8..33d6499d84724 100644 |
7118 | --- a/drivers/xen/xen-scsiback.c |
7119 | +++ b/drivers/xen/xen-scsiback.c |
7120 | @@ -91,7 +91,6 @@ struct vscsibk_info { |
7121 | unsigned int irq; |
7122 | |
7123 | struct vscsiif_back_ring ring; |
7124 | - int ring_error; |
7125 | |
7126 | spinlock_t ring_lock; |
7127 | atomic_t nr_unreplied_reqs; |
7128 | @@ -722,7 +721,8 @@ static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info, |
7129 | return pending_req; |
7130 | } |
7131 | |
7132 | -static int scsiback_do_cmd_fn(struct vscsibk_info *info) |
7133 | +static int scsiback_do_cmd_fn(struct vscsibk_info *info, |
7134 | + unsigned int *eoi_flags) |
7135 | { |
7136 | struct vscsiif_back_ring *ring = &info->ring; |
7137 | struct vscsiif_request ring_req; |
7138 | @@ -739,11 +739,12 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) |
7139 | rc = ring->rsp_prod_pvt; |
7140 | pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n", |
7141 | info->domid, rp, rc, rp - rc); |
7142 | - info->ring_error = 1; |
7143 | - return 0; |
7144 | + return -EINVAL; |
7145 | } |
7146 | |
7147 | while ((rc != rp)) { |
7148 | + *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS; |
7149 | + |
7150 | if (RING_REQUEST_CONS_OVERFLOW(ring, rc)) |
7151 | break; |
7152 | |
7153 | @@ -802,13 +803,16 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) |
7154 | static irqreturn_t scsiback_irq_fn(int irq, void *dev_id) |
7155 | { |
7156 | struct vscsibk_info *info = dev_id; |
7157 | + int rc; |
7158 | + unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; |
7159 | |
7160 | - if (info->ring_error) |
7161 | - return IRQ_HANDLED; |
7162 | - |
7163 | - while (scsiback_do_cmd_fn(info)) |
7164 | + while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0) |
7165 | cond_resched(); |
7166 | |
7167 | + /* In case of a ring error we keep the event channel masked. */ |
7168 | + if (!rc) |
7169 | + xen_irq_lateeoi(irq, eoi_flags); |
7170 | + |
7171 | return IRQ_HANDLED; |
7172 | } |
7173 | |
7174 | @@ -829,7 +833,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref, |
7175 | sring = (struct vscsiif_sring *)area; |
7176 | BACK_RING_INIT(&info->ring, sring, PAGE_SIZE); |
7177 | |
7178 | - err = bind_interdomain_evtchn_to_irq(info->domid, evtchn); |
7179 | + err = bind_interdomain_evtchn_to_irq_lateeoi(info->domid, evtchn); |
7180 | if (err < 0) |
7181 | goto unmap_page; |
7182 | |
7183 | @@ -1252,7 +1256,6 @@ static int scsiback_probe(struct xenbus_device *dev, |
7184 | |
7185 | info->domid = dev->otherend_id; |
7186 | spin_lock_init(&info->ring_lock); |
7187 | - info->ring_error = 0; |
7188 | atomic_set(&info->nr_unreplied_reqs, 0); |
7189 | init_waitqueue_head(&info->waiting_to_free); |
7190 | info->dev = dev; |
7191 | diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c |
7192 | index fe7f0bd2048e4..ee9cabac12041 100644 |
7193 | --- a/fs/9p/vfs_file.c |
7194 | +++ b/fs/9p/vfs_file.c |
7195 | @@ -609,9 +609,9 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma) |
7196 | struct writeback_control wbc = { |
7197 | .nr_to_write = LONG_MAX, |
7198 | .sync_mode = WB_SYNC_ALL, |
7199 | - .range_start = vma->vm_pgoff * PAGE_SIZE, |
7200 | + .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE, |
7201 | /* absolute end, byte at end included */ |
7202 | - .range_end = vma->vm_pgoff * PAGE_SIZE + |
7203 | + .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE + |
7204 | (vma->vm_end - vma->vm_start - 1), |
7205 | }; |
7206 | |
7207 | diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c |
7208 | index c05127f506373..e25133a9e9dfe 100644 |
7209 | --- a/fs/btrfs/ctree.c |
7210 | +++ b/fs/btrfs/ctree.c |
7211 | @@ -1103,6 +1103,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, |
7212 | |
7213 | ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); |
7214 | if (ret) { |
7215 | + btrfs_tree_unlock(cow); |
7216 | + free_extent_buffer(cow); |
7217 | btrfs_abort_transaction(trans, ret); |
7218 | return ret; |
7219 | } |
7220 | @@ -1110,6 +1112,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, |
7221 | if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { |
7222 | ret = btrfs_reloc_cow_block(trans, root, buf, cow); |
7223 | if (ret) { |
7224 | + btrfs_tree_unlock(cow); |
7225 | + free_extent_buffer(cow); |
7226 | btrfs_abort_transaction(trans, ret); |
7227 | return ret; |
7228 | } |
7229 | @@ -1142,6 +1146,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, |
7230 | if (last_ref) { |
7231 | ret = tree_mod_log_free_eb(buf); |
7232 | if (ret) { |
7233 | + btrfs_tree_unlock(cow); |
7234 | + free_extent_buffer(cow); |
7235 | btrfs_abort_transaction(trans, ret); |
7236 | return ret; |
7237 | } |
7238 | diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h |
7239 | index 23b4f38e23928..27128164fac97 100644 |
7240 | --- a/fs/btrfs/ctree.h |
7241 | +++ b/fs/btrfs/ctree.h |
7242 | @@ -3404,6 +3404,8 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root, |
7243 | int btrfs_reada_wait(void *handle); |
7244 | void btrfs_reada_detach(void *handle); |
7245 | int btree_readahead_hook(struct extent_buffer *eb, int err); |
7246 | +void btrfs_reada_remove_dev(struct btrfs_device *dev); |
7247 | +void btrfs_reada_undo_remove_dev(struct btrfs_device *dev); |
7248 | |
7249 | static inline int is_fstree(u64 rootid) |
7250 | { |
7251 | diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c |
7252 | index a34ee9c2f3151..bef62b01824de 100644 |
7253 | --- a/fs/btrfs/delayed-inode.c |
7254 | +++ b/fs/btrfs/delayed-inode.c |
7255 | @@ -627,8 +627,7 @@ static int btrfs_delayed_inode_reserve_metadata( |
7256 | */ |
7257 | if (!src_rsv || (!trans->bytes_reserved && |
7258 | src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { |
7259 | - ret = btrfs_qgroup_reserve_meta_prealloc(root, |
7260 | - fs_info->nodesize, true); |
7261 | + ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); |
7262 | if (ret < 0) |
7263 | return ret; |
7264 | ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes, |
7265 | diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c |
7266 | index 196bd241e701a..96843934dcbba 100644 |
7267 | --- a/fs/btrfs/dev-replace.c |
7268 | +++ b/fs/btrfs/dev-replace.c |
7269 | @@ -190,7 +190,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, |
7270 | int ret = 0; |
7271 | |
7272 | *device_out = NULL; |
7273 | - if (fs_info->fs_devices->seeding) { |
7274 | + if (srcdev->fs_devices->seeding) { |
7275 | btrfs_err(fs_info, "the filesystem is a seed filesystem!"); |
7276 | return -EINVAL; |
7277 | } |
7278 | @@ -631,6 +631,9 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, |
7279 | } |
7280 | btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); |
7281 | |
7282 | + if (!scrub_ret) |
7283 | + btrfs_reada_remove_dev(src_device); |
7284 | + |
7285 | /* |
7286 | * We have to use this loop approach because at this point src_device |
7287 | * has to be available for transaction commit to complete, yet new |
7288 | @@ -639,6 +642,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, |
7289 | while (1) { |
7290 | trans = btrfs_start_transaction(root, 0); |
7291 | if (IS_ERR(trans)) { |
7292 | + btrfs_reada_undo_remove_dev(src_device); |
7293 | mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); |
7294 | return PTR_ERR(trans); |
7295 | } |
7296 | @@ -689,6 +693,7 @@ error: |
7297 | up_write(&dev_replace->rwsem); |
7298 | mutex_unlock(&fs_info->chunk_mutex); |
7299 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
7300 | + btrfs_reada_undo_remove_dev(src_device); |
7301 | btrfs_rm_dev_replace_blocked(fs_info); |
7302 | if (tgt_device) |
7303 | btrfs_destroy_dev_replace_tgtdev(tgt_device); |
7304 | diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c |
7305 | index 1feaeadc8cf59..2656dc8de99c2 100644 |
7306 | --- a/fs/btrfs/reada.c |
7307 | +++ b/fs/btrfs/reada.c |
7308 | @@ -421,6 +421,9 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, |
7309 | if (!dev->bdev) |
7310 | continue; |
7311 | |
7312 | + if (test_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state)) |
7313 | + continue; |
7314 | + |
7315 | if (dev_replace_is_ongoing && |
7316 | dev == fs_info->dev_replace.tgtdev) { |
7317 | /* |
7318 | @@ -445,6 +448,8 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, |
7319 | } |
7320 | have_zone = 1; |
7321 | } |
7322 | + if (!have_zone) |
7323 | + radix_tree_delete(&fs_info->reada_tree, index); |
7324 | spin_unlock(&fs_info->reada_lock); |
7325 | up_read(&fs_info->dev_replace.rwsem); |
7326 | |
7327 | @@ -1012,3 +1017,45 @@ void btrfs_reada_detach(void *handle) |
7328 | |
7329 | kref_put(&rc->refcnt, reada_control_release); |
7330 | } |
7331 | + |
7332 | +/* |
7333 | + * Before removing a device (device replace or device remove ioctls), call this |
7334 | + * function to wait for all existing readahead requests on the device and to |
7335 | + * make sure no one queues more readahead requests for the device. |
7336 | + * |
7337 | + * Must be called without holding neither the device list mutex nor the device |
7338 | + * replace semaphore, otherwise it will deadlock. |
7339 | + */ |
7340 | +void btrfs_reada_remove_dev(struct btrfs_device *dev) |
7341 | +{ |
7342 | + struct btrfs_fs_info *fs_info = dev->fs_info; |
7343 | + |
7344 | + /* Serialize with readahead extent creation at reada_find_extent(). */ |
7345 | + spin_lock(&fs_info->reada_lock); |
7346 | + set_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state); |
7347 | + spin_unlock(&fs_info->reada_lock); |
7348 | + |
7349 | + /* |
7350 | + * There might be readahead requests added to the radix trees which |
7351 | + * were not yet added to the readahead work queue. We need to start |
7352 | + * them and wait for their completion, otherwise we can end up with |
7353 | + * use-after-free problems when dropping the last reference on the |
7354 | + * readahead extents and their zones, as they need to access the |
7355 | + * device structure. |
7356 | + */ |
7357 | + reada_start_machine(fs_info); |
7358 | + btrfs_flush_workqueue(fs_info->readahead_workers); |
7359 | +} |
7360 | + |
7361 | +/* |
7362 | + * If when removing a device (device replace or device remove ioctls) an error |
7363 | + * happens after calling btrfs_reada_remove_dev(), call this to undo what that |
7364 | + * function did. This is safe to call even if btrfs_reada_remove_dev() was not |
7365 | + * called before. |
7366 | + */ |
7367 | +void btrfs_reada_undo_remove_dev(struct btrfs_device *dev) |
7368 | +{ |
7369 | + spin_lock(&dev->fs_info->reada_lock); |
7370 | + clear_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state); |
7371 | + spin_unlock(&dev->fs_info->reada_lock); |
7372 | +} |
7373 | diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c |
7374 | index b0e5dfb9be7ab..88940f494428a 100644 |
7375 | --- a/fs/btrfs/send.c |
7376 | +++ b/fs/btrfs/send.c |
7377 | @@ -3812,6 +3812,72 @@ static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) |
7378 | return 0; |
7379 | } |
7380 | |
7381 | +/* |
7382 | + * When processing the new references for an inode we may orphanize an existing |
7383 | + * directory inode because its old name conflicts with one of the new references |
7384 | + * of the current inode. Later, when processing another new reference of our |
7385 | + * inode, we might need to orphanize another inode, but the path we have in the |
7386 | + * reference reflects the pre-orphanization name of the directory we previously |
7387 | + * orphanized. For example: |
7388 | + * |
7389 | + * parent snapshot looks like: |
7390 | + * |
7391 | + * . (ino 256) |
7392 | + * |----- f1 (ino 257) |
7393 | + * |----- f2 (ino 258) |
7394 | + * |----- d1/ (ino 259) |
7395 | + * |----- d2/ (ino 260) |
7396 | + * |
7397 | + * send snapshot looks like: |
7398 | + * |
7399 | + * . (ino 256) |
7400 | + * |----- d1 (ino 258) |
7401 | + * |----- f2/ (ino 259) |
7402 | + * |----- f2_link/ (ino 260) |
7403 | + * | |----- f1 (ino 257) |
7404 | + * | |
7405 | + * |----- d2 (ino 258) |
7406 | + * |
7407 | + * When processing inode 257 we compute the name for inode 259 as "d1", and we |
7408 | + * cache it in the name cache. Later when we start processing inode 258, when |
7409 | + * collecting all its new references we set a full path of "d1/d2" for its new |
7410 | + * reference with name "d2". When we start processing the new references we |
7411 | + * start by processing the new reference with name "d1", and this results in |
7412 | + * orphanizing inode 259, since its old reference causes a conflict. Then we |
7413 | + * move on the next new reference, with name "d2", and we find out we must |
7414 | + * orphanize inode 260, as its old reference conflicts with ours - but for the |
7415 | + * orphanization we use a source path corresponding to the path we stored in the |
7416 | + * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the |
7417 | + * receiver fail since the path component "d1/" no longer exists, it was renamed |
7418 | + * to "o259-6-0/" when processing the previous new reference. So in this case we |
7419 | + * must recompute the path in the new reference and use it for the new |
7420 | + * orphanization operation. |
7421 | + */ |
7422 | +static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) |
7423 | +{ |
7424 | + char *name; |
7425 | + int ret; |
7426 | + |
7427 | + name = kmemdup(ref->name, ref->name_len, GFP_KERNEL); |
7428 | + if (!name) |
7429 | + return -ENOMEM; |
7430 | + |
7431 | + fs_path_reset(ref->full_path); |
7432 | + ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path); |
7433 | + if (ret < 0) |
7434 | + goto out; |
7435 | + |
7436 | + ret = fs_path_add(ref->full_path, name, ref->name_len); |
7437 | + if (ret < 0) |
7438 | + goto out; |
7439 | + |
7440 | + /* Update the reference's base name pointer. */ |
7441 | + set_ref_path(ref, ref->full_path); |
7442 | +out: |
7443 | + kfree(name); |
7444 | + return ret; |
7445 | +} |
7446 | + |
7447 | /* |
7448 | * This does all the move/link/unlink/rmdir magic. |
7449 | */ |
7450 | @@ -3880,52 +3946,56 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) |
7451 | goto out; |
7452 | } |
7453 | |
7454 | + /* |
7455 | + * Before doing any rename and link operations, do a first pass on the |
7456 | + * new references to orphanize any unprocessed inodes that may have a |
7457 | + * reference that conflicts with one of the new references of the current |
7458 | + * inode. This needs to happen first because a new reference may conflict |
7459 | + * with the old reference of a parent directory, so we must make sure |
7460 | + * that the path used for link and rename commands don't use an |
7461 | + * orphanized name when an ancestor was not yet orphanized. |
7462 | + * |
7463 | + * Example: |
7464 | + * |
7465 | + * Parent snapshot: |
7466 | + * |
7467 | + * . (ino 256) |
7468 | + * |----- testdir/ (ino 259) |
7469 | + * | |----- a (ino 257) |
7470 | + * | |
7471 | + * |----- b (ino 258) |
7472 | + * |
7473 | + * Send snapshot: |
7474 | + * |
7475 | + * . (ino 256) |
7476 | + * |----- testdir_2/ (ino 259) |
7477 | + * | |----- a (ino 260) |
7478 | + * | |
7479 | + * |----- testdir (ino 257) |
7480 | + * |----- b (ino 257) |
7481 | + * |----- b2 (ino 258) |
7482 | + * |
7483 | + * Processing the new reference for inode 257 with name "b" may happen |
7484 | + * before processing the new reference with name "testdir". If so, we |
7485 | + * must make sure that by the time we send a link command to create the |
7486 | + * hard link "b", inode 259 was already orphanized, since the generated |
7487 | + * path in "valid_path" already contains the orphanized name for 259. |
7488 | + * We are processing inode 257, so only later when processing 259 we do |
7489 | + * the rename operation to change its temporary (orphanized) name to |
7490 | + * "testdir_2". |
7491 | + */ |
7492 | list_for_each_entry(cur, &sctx->new_refs, list) { |
7493 | - /* |
7494 | - * We may have refs where the parent directory does not exist |
7495 | - * yet. This happens if the parent directories inum is higher |
7496 | - * than the current inum. To handle this case, we create the |
7497 | - * parent directory out of order. But we need to check if this |
7498 | - * did already happen before due to other refs in the same dir. |
7499 | - */ |
7500 | ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); |
7501 | if (ret < 0) |
7502 | goto out; |
7503 | - if (ret == inode_state_will_create) { |
7504 | - ret = 0; |
7505 | - /* |
7506 | - * First check if any of the current inodes refs did |
7507 | - * already create the dir. |
7508 | - */ |
7509 | - list_for_each_entry(cur2, &sctx->new_refs, list) { |
7510 | - if (cur == cur2) |
7511 | - break; |
7512 | - if (cur2->dir == cur->dir) { |
7513 | - ret = 1; |
7514 | - break; |
7515 | - } |
7516 | - } |
7517 | - |
7518 | - /* |
7519 | - * If that did not happen, check if a previous inode |
7520 | - * did already create the dir. |
7521 | - */ |
7522 | - if (!ret) |
7523 | - ret = did_create_dir(sctx, cur->dir); |
7524 | - if (ret < 0) |
7525 | - goto out; |
7526 | - if (!ret) { |
7527 | - ret = send_create_inode(sctx, cur->dir); |
7528 | - if (ret < 0) |
7529 | - goto out; |
7530 | - } |
7531 | - } |
7532 | + if (ret == inode_state_will_create) |
7533 | + continue; |
7534 | |
7535 | /* |
7536 | - * Check if this new ref would overwrite the first ref of |
7537 | - * another unprocessed inode. If yes, orphanize the |
7538 | - * overwritten inode. If we find an overwritten ref that is |
7539 | - * not the first ref, simply unlink it. |
7540 | + * Check if this new ref would overwrite the first ref of another |
7541 | + * unprocessed inode. If yes, orphanize the overwritten inode. |
7542 | + * If we find an overwritten ref that is not the first ref, |
7543 | + * simply unlink it. |
7544 | */ |
7545 | ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, |
7546 | cur->name, cur->name_len, |
7547 | @@ -3942,6 +4012,12 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) |
7548 | struct name_cache_entry *nce; |
7549 | struct waiting_dir_move *wdm; |
7550 | |
7551 | + if (orphanized_dir) { |
7552 | + ret = refresh_ref_path(sctx, cur); |
7553 | + if (ret < 0) |
7554 | + goto out; |
7555 | + } |
7556 | + |
7557 | ret = orphanize_inode(sctx, ow_inode, ow_gen, |
7558 | cur->full_path); |
7559 | if (ret < 0) |
7560 | @@ -4004,6 +4080,49 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) |
7561 | } |
7562 | } |
7563 | |
7564 | + } |
7565 | + |
7566 | + list_for_each_entry(cur, &sctx->new_refs, list) { |
7567 | + /* |
7568 | + * We may have refs where the parent directory does not exist |
7569 | + * yet. This happens if the parent directories inum is higher |
7570 | + * than the current inum. To handle this case, we create the |
7571 | + * parent directory out of order. But we need to check if this |
7572 | + * did already happen before due to other refs in the same dir. |
7573 | + */ |
7574 | + ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); |
7575 | + if (ret < 0) |
7576 | + goto out; |
7577 | + if (ret == inode_state_will_create) { |
7578 | + ret = 0; |
7579 | + /* |
7580 | + * First check if any of the current inodes refs did |
7581 | + * already create the dir. |
7582 | + */ |
7583 | + list_for_each_entry(cur2, &sctx->new_refs, list) { |
7584 | + if (cur == cur2) |
7585 | + break; |
7586 | + if (cur2->dir == cur->dir) { |
7587 | + ret = 1; |
7588 | + break; |
7589 | + } |
7590 | + } |
7591 | + |
7592 | + /* |
7593 | + * If that did not happen, check if a previous inode |
7594 | + * did already create the dir. |
7595 | + */ |
7596 | + if (!ret) |
7597 | + ret = did_create_dir(sctx, cur->dir); |
7598 | + if (ret < 0) |
7599 | + goto out; |
7600 | + if (!ret) { |
7601 | + ret = send_create_inode(sctx, cur->dir); |
7602 | + if (ret < 0) |
7603 | + goto out; |
7604 | + } |
7605 | + } |
7606 | + |
7607 | if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) { |
7608 | ret = wait_for_dest_dir_move(sctx, cur, is_orphan); |
7609 | if (ret < 0) |
7610 | @@ -7233,7 +7352,7 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) |
7611 | |
7612 | alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1); |
7613 | |
7614 | - sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL); |
7615 | + sctx->clone_roots = kvzalloc(alloc_size, GFP_KERNEL); |
7616 | if (!sctx->clone_roots) { |
7617 | ret = -ENOMEM; |
7618 | goto out; |
7619 | diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c |
7620 | index 84b8d6ebf98f3..48e46323d519c 100644 |
7621 | --- a/fs/btrfs/tree-checker.c |
7622 | +++ b/fs/btrfs/tree-checker.c |
7623 | @@ -577,18 +577,36 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf, |
7624 | u64 type; |
7625 | u64 features; |
7626 | bool mixed = false; |
7627 | + int raid_index; |
7628 | + int nparity; |
7629 | + int ncopies; |
7630 | |
7631 | length = btrfs_chunk_length(leaf, chunk); |
7632 | stripe_len = btrfs_chunk_stripe_len(leaf, chunk); |
7633 | num_stripes = btrfs_chunk_num_stripes(leaf, chunk); |
7634 | sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); |
7635 | type = btrfs_chunk_type(leaf, chunk); |
7636 | + raid_index = btrfs_bg_flags_to_raid_index(type); |
7637 | + ncopies = btrfs_raid_array[raid_index].ncopies; |
7638 | + nparity = btrfs_raid_array[raid_index].nparity; |
7639 | |
7640 | if (!num_stripes) { |
7641 | chunk_err(leaf, chunk, logical, |
7642 | "invalid chunk num_stripes, have %u", num_stripes); |
7643 | return -EUCLEAN; |
7644 | } |
7645 | + if (num_stripes < ncopies) { |
7646 | + chunk_err(leaf, chunk, logical, |
7647 | + "invalid chunk num_stripes < ncopies, have %u < %d", |
7648 | + num_stripes, ncopies); |
7649 | + return -EUCLEAN; |
7650 | + } |
7651 | + if (nparity && num_stripes == nparity) { |
7652 | + chunk_err(leaf, chunk, logical, |
7653 | + "invalid chunk num_stripes == nparity, have %u == %d", |
7654 | + num_stripes, nparity); |
7655 | + return -EUCLEAN; |
7656 | + } |
7657 | if (!IS_ALIGNED(logical, fs_info->sectorsize)) { |
7658 | chunk_err(leaf, chunk, logical, |
7659 | "invalid chunk logical, have %llu should aligned to %u", |
7660 | @@ -869,7 +887,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key, |
7661 | int slot) |
7662 | { |
7663 | struct btrfs_fs_info *fs_info = leaf->fs_info; |
7664 | - struct btrfs_root_item ri; |
7665 | + struct btrfs_root_item ri = { 0 }; |
7666 | const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY | |
7667 | BTRFS_ROOT_SUBVOL_DEAD; |
7668 | |
7669 | @@ -889,14 +907,21 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key, |
7670 | return -EUCLEAN; |
7671 | } |
7672 | |
7673 | - if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) { |
7674 | + if (btrfs_item_size_nr(leaf, slot) != sizeof(ri) && |
7675 | + btrfs_item_size_nr(leaf, slot) != btrfs_legacy_root_item_size()) { |
7676 | generic_err(leaf, slot, |
7677 | - "invalid root item size, have %u expect %zu", |
7678 | - btrfs_item_size_nr(leaf, slot), sizeof(ri)); |
7679 | + "invalid root item size, have %u expect %zu or %u", |
7680 | + btrfs_item_size_nr(leaf, slot), sizeof(ri), |
7681 | + btrfs_legacy_root_item_size()); |
7682 | } |
7683 | |
7684 | + /* |
7685 | + * For legacy root item, the members starting at generation_v2 will be |
7686 | + * all filled with 0. |
7687 | + * And since we allow geneartion_v2 as 0, it will still pass the check. |
7688 | + */ |
7689 | read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot), |
7690 | - sizeof(ri)); |
7691 | + btrfs_item_size_nr(leaf, slot)); |
7692 | |
7693 | /* Generation related */ |
7694 | if (btrfs_root_generation(&ri) > |
7695 | diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c |
7696 | index 7042b84edc89d..de53e51669976 100644 |
7697 | --- a/fs/btrfs/tree-log.c |
7698 | +++ b/fs/btrfs/tree-log.c |
7699 | @@ -3639,6 +3639,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, |
7700 | * search and this search we'll not find the key again and can just |
7701 | * bail. |
7702 | */ |
7703 | +search: |
7704 | ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); |
7705 | if (ret != 0) |
7706 | goto done; |
7707 | @@ -3658,6 +3659,13 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, |
7708 | |
7709 | if (min_key.objectid != ino || min_key.type != key_type) |
7710 | goto done; |
7711 | + |
7712 | + if (need_resched()) { |
7713 | + btrfs_release_path(path); |
7714 | + cond_resched(); |
7715 | + goto search; |
7716 | + } |
7717 | + |
7718 | ret = overwrite_item(trans, log, dst_path, src, i, |
7719 | &min_key); |
7720 | if (ret) { |
7721 | diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c |
7722 | index e798caee978e7..58910a0a3e4a4 100644 |
7723 | --- a/fs/btrfs/volumes.c |
7724 | +++ b/fs/btrfs/volumes.c |
7725 | @@ -1123,16 +1123,18 @@ static noinline struct btrfs_device *device_list_add(const char *path, |
7726 | bdput(path_bdev); |
7727 | mutex_unlock(&fs_devices->device_list_mutex); |
7728 | btrfs_warn_in_rcu(device->fs_info, |
7729 | - "duplicate device fsid:devid for %pU:%llu old:%s new:%s", |
7730 | - disk_super->fsid, devid, |
7731 | - rcu_str_deref(device->name), path); |
7732 | + "duplicate device %s devid %llu generation %llu scanned by %s (%d)", |
7733 | + path, devid, found_transid, |
7734 | + current->comm, |
7735 | + task_pid_nr(current)); |
7736 | return ERR_PTR(-EEXIST); |
7737 | } |
7738 | bdput(path_bdev); |
7739 | btrfs_info_in_rcu(device->fs_info, |
7740 | - "device fsid %pU devid %llu moved old:%s new:%s", |
7741 | - disk_super->fsid, devid, |
7742 | - rcu_str_deref(device->name), path); |
7743 | + "devid %llu device path %s changed to %s scanned by %s (%d)", |
7744 | + devid, rcu_str_deref(device->name), |
7745 | + path, current->comm, |
7746 | + task_pid_nr(current)); |
7747 | } |
7748 | |
7749 | name = rcu_string_strdup(path, GFP_NOFS); |
7750 | @@ -2206,6 +2208,8 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, |
7751 | |
7752 | mutex_unlock(&uuid_mutex); |
7753 | ret = btrfs_shrink_device(device, 0); |
7754 | + if (!ret) |
7755 | + btrfs_reada_remove_dev(device); |
7756 | mutex_lock(&uuid_mutex); |
7757 | if (ret) |
7758 | goto error_undo; |
7759 | @@ -2292,6 +2296,7 @@ out: |
7760 | return ret; |
7761 | |
7762 | error_undo: |
7763 | + btrfs_reada_undo_remove_dev(device); |
7764 | if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { |
7765 | mutex_lock(&fs_info->chunk_mutex); |
7766 | list_add(&device->dev_alloc_list, |
7767 | diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h |
7768 | index 5acf5c507ec2e..aa6a6d7b2978e 100644 |
7769 | --- a/fs/btrfs/volumes.h |
7770 | +++ b/fs/btrfs/volumes.h |
7771 | @@ -56,6 +56,7 @@ struct btrfs_io_geometry { |
7772 | #define BTRFS_DEV_STATE_MISSING (2) |
7773 | #define BTRFS_DEV_STATE_REPLACE_TGT (3) |
7774 | #define BTRFS_DEV_STATE_FLUSH_SENT (4) |
7775 | +#define BTRFS_DEV_STATE_NO_READA (5) |
7776 | |
7777 | struct btrfs_device { |
7778 | struct list_head dev_list; /* device_list_mutex */ |
7779 | diff --git a/fs/buffer.c b/fs/buffer.c |
7780 | index 22d8ac4a8c40a..0d7bd7712076d 100644 |
7781 | --- a/fs/buffer.c |
7782 | +++ b/fs/buffer.c |
7783 | @@ -2739,16 +2739,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block, |
7784 | /* Is the page fully outside i_size? (truncate in progress) */ |
7785 | offset = i_size & (PAGE_SIZE-1); |
7786 | if (page->index >= end_index+1 || !offset) { |
7787 | - /* |
7788 | - * The page may have dirty, unmapped buffers. For example, |
7789 | - * they may have been added in ext3_writepage(). Make them |
7790 | - * freeable here, so the page does not leak. |
7791 | - */ |
7792 | -#if 0 |
7793 | - /* Not really sure about this - do we need this ? */ |
7794 | - if (page->mapping->a_ops->invalidatepage) |
7795 | - page->mapping->a_ops->invalidatepage(page, offset); |
7796 | -#endif |
7797 | unlock_page(page); |
7798 | return 0; /* don't care */ |
7799 | } |
7800 | @@ -2943,12 +2933,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block, |
7801 | /* Is the page fully outside i_size? (truncate in progress) */ |
7802 | offset = i_size & (PAGE_SIZE-1); |
7803 | if (page->index >= end_index+1 || !offset) { |
7804 | - /* |
7805 | - * The page may have dirty, unmapped buffers. For example, |
7806 | - * they may have been added in ext3_writepage(). Make them |
7807 | - * freeable here, so the page does not leak. |
7808 | - */ |
7809 | - do_invalidatepage(page, 0, PAGE_SIZE); |
7810 | unlock_page(page); |
7811 | return 0; /* don't care */ |
7812 | } |
7813 | diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c |
7814 | index ad057ed2b30b0..bd5fe8d00d009 100644 |
7815 | --- a/fs/cachefiles/rdwr.c |
7816 | +++ b/fs/cachefiles/rdwr.c |
7817 | @@ -121,7 +121,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object, |
7818 | _debug("reissue read"); |
7819 | ret = bmapping->a_ops->readpage(NULL, backpage); |
7820 | if (ret < 0) |
7821 | - goto unlock_discard; |
7822 | + goto discard; |
7823 | } |
7824 | |
7825 | /* but the page may have been read before the monitor was installed, so |
7826 | @@ -138,6 +138,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object, |
7827 | |
7828 | unlock_discard: |
7829 | unlock_page(backpage); |
7830 | +discard: |
7831 | spin_lock_irq(&object->work_lock); |
7832 | list_del(&monitor->op_link); |
7833 | spin_unlock_irq(&object->work_lock); |
7834 | diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c |
7835 | index 7ab6166011418..a02e845eb0fbf 100644 |
7836 | --- a/fs/ceph/addr.c |
7837 | +++ b/fs/ceph/addr.c |
7838 | @@ -1427,7 +1427,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) |
7839 | struct ceph_inode_info *ci = ceph_inode(inode); |
7840 | struct ceph_file_info *fi = vma->vm_file->private_data; |
7841 | struct page *pinned_page = NULL; |
7842 | - loff_t off = vmf->pgoff << PAGE_SHIFT; |
7843 | + loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; |
7844 | int want, got, err; |
7845 | sigset_t oldset; |
7846 | vm_fault_t ret = VM_FAULT_SIGBUS; |
7847 | diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c |
7848 | index 17df90b5f57a2..fd9e289f3e72a 100644 |
7849 | --- a/fs/cifs/inode.c |
7850 | +++ b/fs/cifs/inode.c |
7851 | @@ -2614,13 +2614,18 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs) |
7852 | { |
7853 | struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); |
7854 | struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); |
7855 | + int rc, retries = 0; |
7856 | |
7857 | - if (pTcon->unix_ext) |
7858 | - return cifs_setattr_unix(direntry, attrs); |
7859 | - |
7860 | - return cifs_setattr_nounix(direntry, attrs); |
7861 | + do { |
7862 | + if (pTcon->unix_ext) |
7863 | + rc = cifs_setattr_unix(direntry, attrs); |
7864 | + else |
7865 | + rc = cifs_setattr_nounix(direntry, attrs); |
7866 | + retries++; |
7867 | + } while (is_retryable_error(rc) && retries < 2); |
7868 | |
7869 | /* BB: add cifs_setattr_legacy for really old servers */ |
7870 | + return rc; |
7871 | } |
7872 | |
7873 | #if 0 |
7874 | diff --git a/fs/exec.c b/fs/exec.c |
7875 | index de833553ae27d..2441eb1a1e2d0 100644 |
7876 | --- a/fs/exec.c |
7877 | +++ b/fs/exec.c |
7878 | @@ -1044,11 +1044,24 @@ static int exec_mmap(struct mm_struct *mm) |
7879 | } |
7880 | |
7881 | task_lock(tsk); |
7882 | - active_mm = tsk->active_mm; |
7883 | membarrier_exec_mmap(mm); |
7884 | - tsk->mm = mm; |
7885 | + |
7886 | + local_irq_disable(); |
7887 | + active_mm = tsk->active_mm; |
7888 | tsk->active_mm = mm; |
7889 | + tsk->mm = mm; |
7890 | + /* |
7891 | + * This prevents preemption while active_mm is being loaded and |
7892 | + * it and mm are being updated, which could cause problems for |
7893 | + * lazy tlb mm refcounting when these are updated by context |
7894 | + * switches. Not all architectures can handle irqs off over |
7895 | + * activate_mm yet. |
7896 | + */ |
7897 | + if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) |
7898 | + local_irq_enable(); |
7899 | activate_mm(active_mm, mm); |
7900 | + if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) |
7901 | + local_irq_enable(); |
7902 | tsk->mm->vmacache_seqnum = 0; |
7903 | vmacache_flush(tsk); |
7904 | task_unlock(tsk); |
7905 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
7906 | index 95a8a04c77dd3..cbd028a31daff 100644 |
7907 | --- a/fs/ext4/inode.c |
7908 | +++ b/fs/ext4/inode.c |
7909 | @@ -5271,6 +5271,12 @@ static int ext4_do_update_inode(handle_t *handle, |
7910 | if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) |
7911 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); |
7912 | |
7913 | + err = ext4_inode_blocks_set(handle, raw_inode, ei); |
7914 | + if (err) { |
7915 | + spin_unlock(&ei->i_raw_lock); |
7916 | + goto out_brelse; |
7917 | + } |
7918 | + |
7919 | raw_inode->i_mode = cpu_to_le16(inode->i_mode); |
7920 | i_uid = i_uid_read(inode); |
7921 | i_gid = i_gid_read(inode); |
7922 | @@ -5304,11 +5310,6 @@ static int ext4_do_update_inode(handle_t *handle, |
7923 | EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); |
7924 | EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); |
7925 | |
7926 | - err = ext4_inode_blocks_set(handle, raw_inode, ei); |
7927 | - if (err) { |
7928 | - spin_unlock(&ei->i_raw_lock); |
7929 | - goto out_brelse; |
7930 | - } |
7931 | raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); |
7932 | raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); |
7933 | if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) |
7934 | diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c |
7935 | index 080e25f6ef564..ad1d4c8faf449 100644 |
7936 | --- a/fs/ext4/resize.c |
7937 | +++ b/fs/ext4/resize.c |
7938 | @@ -861,8 +861,10 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, |
7939 | |
7940 | BUFFER_TRACE(dind, "get_write_access"); |
7941 | err = ext4_journal_get_write_access(handle, dind); |
7942 | - if (unlikely(err)) |
7943 | + if (unlikely(err)) { |
7944 | ext4_std_error(sb, err); |
7945 | + goto errout; |
7946 | + } |
7947 | |
7948 | /* ext4_reserve_inode_write() gets a reference on the iloc */ |
7949 | err = ext4_reserve_inode_write(handle, inode, &iloc); |
7950 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
7951 | index 4aae7e3e89a12..6a260cc8bce6b 100644 |
7952 | --- a/fs/ext4/super.c |
7953 | +++ b/fs/ext4/super.c |
7954 | @@ -4684,6 +4684,7 @@ cantfind_ext4: |
7955 | |
7956 | failed_mount8: |
7957 | ext4_unregister_sysfs(sb); |
7958 | + kobject_put(&sbi->s_kobj); |
7959 | failed_mount7: |
7960 | ext4_unregister_li_request(sb); |
7961 | failed_mount6: |
7962 | @@ -5856,6 +5857,11 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, |
7963 | /* Quotafile not on the same filesystem? */ |
7964 | if (path->dentry->d_sb != sb) |
7965 | return -EXDEV; |
7966 | + |
7967 | + /* Quota already enabled for this file? */ |
7968 | + if (IS_NOQUOTA(d_inode(path->dentry))) |
7969 | + return -EBUSY; |
7970 | + |
7971 | /* Journaling quota? */ |
7972 | if (EXT4_SB(sb)->s_qf_names[type]) { |
7973 | /* Quotafile not in fs root? */ |
7974 | diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c |
7975 | index bbd07fe8a4921..c966ccc44c157 100644 |
7976 | --- a/fs/f2fs/checkpoint.c |
7977 | +++ b/fs/f2fs/checkpoint.c |
7978 | @@ -108,7 +108,7 @@ struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) |
7979 | return __get_meta_page(sbi, index, true); |
7980 | } |
7981 | |
7982 | -struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index) |
7983 | +struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index) |
7984 | { |
7985 | struct page *page; |
7986 | int count = 0; |
7987 | @@ -243,6 +243,8 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, |
7988 | blkno * NAT_ENTRY_PER_BLOCK); |
7989 | break; |
7990 | case META_SIT: |
7991 | + if (unlikely(blkno >= TOTAL_SEGS(sbi))) |
7992 | + goto out; |
7993 | /* get sit block addr */ |
7994 | fio.new_blkaddr = current_sit_addr(sbi, |
7995 | blkno * SIT_ENTRY_PER_BLOCK); |
7996 | @@ -1044,8 +1046,12 @@ int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) |
7997 | get_pages(sbi, is_dir ? |
7998 | F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA)); |
7999 | retry: |
8000 | - if (unlikely(f2fs_cp_error(sbi))) |
8001 | + if (unlikely(f2fs_cp_error(sbi))) { |
8002 | + trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, |
8003 | + get_pages(sbi, is_dir ? |
8004 | + F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA)); |
8005 | return -EIO; |
8006 | + } |
8007 | |
8008 | spin_lock(&sbi->inode_lock[type]); |
8009 | |
8010 | diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c |
8011 | index e9af46dc06f72..78d041f9775a4 100644 |
8012 | --- a/fs/f2fs/dir.c |
8013 | +++ b/fs/f2fs/dir.c |
8014 | @@ -303,16 +303,15 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, |
8015 | unsigned int max_depth; |
8016 | unsigned int level; |
8017 | |
8018 | + *res_page = NULL; |
8019 | + |
8020 | if (f2fs_has_inline_dentry(dir)) { |
8021 | - *res_page = NULL; |
8022 | de = f2fs_find_in_inline_dir(dir, fname, res_page); |
8023 | goto out; |
8024 | } |
8025 | |
8026 | - if (npages == 0) { |
8027 | - *res_page = NULL; |
8028 | + if (npages == 0) |
8029 | goto out; |
8030 | - } |
8031 | |
8032 | max_depth = F2FS_I(dir)->i_current_depth; |
8033 | if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) { |
8034 | @@ -323,7 +322,6 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, |
8035 | } |
8036 | |
8037 | for (level = 0; level < max_depth; level++) { |
8038 | - *res_page = NULL; |
8039 | de = find_in_level(dir, level, fname, res_page); |
8040 | if (de || IS_ERR(*res_page)) |
8041 | break; |
8042 | diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h |
8043 | index b3b7e63394be7..63440abe58c42 100644 |
8044 | --- a/fs/f2fs/f2fs.h |
8045 | +++ b/fs/f2fs/f2fs.h |
8046 | @@ -3149,7 +3149,7 @@ enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, |
8047 | void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); |
8048 | struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); |
8049 | struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); |
8050 | -struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index); |
8051 | +struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); |
8052 | struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); |
8053 | bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, |
8054 | block_t blkaddr, int type); |
8055 | diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c |
8056 | index ed12e96681842..2a4a382f28fed 100644 |
8057 | --- a/fs/f2fs/node.c |
8058 | +++ b/fs/f2fs/node.c |
8059 | @@ -109,7 +109,7 @@ static void clear_node_page_dirty(struct page *page) |
8060 | |
8061 | static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) |
8062 | { |
8063 | - return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid)); |
8064 | + return f2fs_get_meta_page(sbi, current_nat_addr(sbi, nid)); |
8065 | } |
8066 | |
8067 | static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) |
8068 | diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c |
8069 | index 7d85784012678..5ba677f85533c 100644 |
8070 | --- a/fs/f2fs/segment.c |
8071 | +++ b/fs/f2fs/segment.c |
8072 | @@ -2310,7 +2310,9 @@ int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) |
8073 | */ |
8074 | struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) |
8075 | { |
8076 | - return f2fs_get_meta_page_nofail(sbi, GET_SUM_BLOCK(sbi, segno)); |
8077 | + if (unlikely(f2fs_cp_error(sbi))) |
8078 | + return ERR_PTR(-EIO); |
8079 | + return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno)); |
8080 | } |
8081 | |
8082 | void f2fs_update_meta_page(struct f2fs_sb_info *sbi, |
8083 | @@ -2582,7 +2584,11 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type) |
8084 | __next_free_blkoff(sbi, curseg, 0); |
8085 | |
8086 | sum_page = f2fs_get_sum_page(sbi, new_segno); |
8087 | - f2fs_bug_on(sbi, IS_ERR(sum_page)); |
8088 | + if (IS_ERR(sum_page)) { |
8089 | + /* GC won't be able to use stale summary pages by cp_error */ |
8090 | + memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE); |
8091 | + return; |
8092 | + } |
8093 | sum_node = (struct f2fs_summary_block *)page_address(sum_page); |
8094 | memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); |
8095 | f2fs_put_page(sum_page, 1); |
8096 | @@ -3713,7 +3719,7 @@ int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, |
8097 | static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, |
8098 | unsigned int segno) |
8099 | { |
8100 | - return f2fs_get_meta_page_nofail(sbi, current_sit_addr(sbi, segno)); |
8101 | + return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno)); |
8102 | } |
8103 | |
8104 | static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, |
8105 | diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h |
8106 | index 5f89c515f5bb7..33a6b074209da 100644 |
8107 | --- a/fs/gfs2/incore.h |
8108 | +++ b/fs/gfs2/incore.h |
8109 | @@ -694,6 +694,7 @@ struct gfs2_sbd { |
8110 | struct super_block *sd_vfs; |
8111 | struct gfs2_pcpu_lkstats __percpu *sd_lkstats; |
8112 | struct kobject sd_kobj; |
8113 | + struct completion sd_kobj_unregister; |
8114 | unsigned long sd_flags; /* SDF_... */ |
8115 | struct gfs2_sb_host sd_sb; |
8116 | |
8117 | diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c |
8118 | index e0c55765b06d2..29b27d769860c 100644 |
8119 | --- a/fs/gfs2/ops_fstype.c |
8120 | +++ b/fs/gfs2/ops_fstype.c |
8121 | @@ -169,15 +169,19 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) |
8122 | return -EINVAL; |
8123 | } |
8124 | |
8125 | - /* If format numbers match exactly, we're done. */ |
8126 | - |
8127 | - if (sb->sb_fs_format == GFS2_FORMAT_FS && |
8128 | - sb->sb_multihost_format == GFS2_FORMAT_MULTI) |
8129 | - return 0; |
8130 | + if (sb->sb_fs_format != GFS2_FORMAT_FS || |
8131 | + sb->sb_multihost_format != GFS2_FORMAT_MULTI) { |
8132 | + fs_warn(sdp, "Unknown on-disk format, unable to mount\n"); |
8133 | + return -EINVAL; |
8134 | + } |
8135 | |
8136 | - fs_warn(sdp, "Unknown on-disk format, unable to mount\n"); |
8137 | + if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE || |
8138 | + (sb->sb_bsize & (sb->sb_bsize - 1))) { |
8139 | + pr_warn("Invalid superblock size\n"); |
8140 | + return -EINVAL; |
8141 | + } |
8142 | |
8143 | - return -EINVAL; |
8144 | + return 0; |
8145 | } |
8146 | |
8147 | static void end_bio_io_page(struct bio *bio) |
8148 | @@ -1094,26 +1098,14 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) |
8149 | } |
8150 | |
8151 | error = init_names(sdp, silent); |
8152 | - if (error) { |
8153 | - /* In this case, we haven't initialized sysfs, so we have to |
8154 | - manually free the sdp. */ |
8155 | - free_sbd(sdp); |
8156 | - sb->s_fs_info = NULL; |
8157 | - return error; |
8158 | - } |
8159 | + if (error) |
8160 | + goto fail_free; |
8161 | |
8162 | snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name); |
8163 | |
8164 | error = gfs2_sys_fs_add(sdp); |
8165 | - /* |
8166 | - * If we hit an error here, gfs2_sys_fs_add will have called function |
8167 | - * kobject_put which causes the sysfs usage count to go to zero, which |
8168 | - * causes sysfs to call function gfs2_sbd_release, which frees sdp. |
8169 | - * Subsequent error paths here will call gfs2_sys_fs_del, which also |
8170 | - * kobject_put to free sdp. |
8171 | - */ |
8172 | if (error) |
8173 | - return error; |
8174 | + goto fail_free; |
8175 | |
8176 | gfs2_create_debugfs_file(sdp); |
8177 | |
8178 | @@ -1210,9 +1202,9 @@ fail_lm: |
8179 | gfs2_lm_unmount(sdp); |
8180 | fail_debug: |
8181 | gfs2_delete_debugfs_file(sdp); |
8182 | - /* gfs2_sys_fs_del must be the last thing we do, since it causes |
8183 | - * sysfs to call function gfs2_sbd_release, which frees sdp. */ |
8184 | gfs2_sys_fs_del(sdp); |
8185 | +fail_free: |
8186 | + free_sbd(sdp); |
8187 | sb->s_fs_info = NULL; |
8188 | return error; |
8189 | } |
8190 | diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c |
8191 | index 5fa1eec4fb4f5..5935ce5ae5636 100644 |
8192 | --- a/fs/gfs2/super.c |
8193 | +++ b/fs/gfs2/super.c |
8194 | @@ -695,6 +695,7 @@ restart: |
8195 | |
8196 | /* At this point, we're through participating in the lockspace */ |
8197 | gfs2_sys_fs_del(sdp); |
8198 | + free_sbd(sdp); |
8199 | } |
8200 | |
8201 | /** |
8202 | diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c |
8203 | index dd15b8e4af2ce..1c6e52dc878e3 100644 |
8204 | --- a/fs/gfs2/sys.c |
8205 | +++ b/fs/gfs2/sys.c |
8206 | @@ -302,7 +302,7 @@ static void gfs2_sbd_release(struct kobject *kobj) |
8207 | { |
8208 | struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); |
8209 | |
8210 | - free_sbd(sdp); |
8211 | + complete(&sdp->sd_kobj_unregister); |
8212 | } |
8213 | |
8214 | static struct kobj_type gfs2_ktype = { |
8215 | @@ -652,6 +652,7 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp) |
8216 | sprintf(ro, "RDONLY=%d", sb_rdonly(sb)); |
8217 | sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0); |
8218 | |
8219 | + init_completion(&sdp->sd_kobj_unregister); |
8220 | sdp->sd_kobj.kset = gfs2_kset; |
8221 | error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL, |
8222 | "%s", sdp->sd_table_name); |
8223 | @@ -682,6 +683,7 @@ fail_tune: |
8224 | fail_reg: |
8225 | fs_err(sdp, "error %d adding sysfs files\n", error); |
8226 | kobject_put(&sdp->sd_kobj); |
8227 | + wait_for_completion(&sdp->sd_kobj_unregister); |
8228 | sb->s_fs_info = NULL; |
8229 | return error; |
8230 | } |
8231 | @@ -692,6 +694,7 @@ void gfs2_sys_fs_del(struct gfs2_sbd *sdp) |
8232 | sysfs_remove_group(&sdp->sd_kobj, &tune_group); |
8233 | sysfs_remove_group(&sdp->sd_kobj, &lock_module_group); |
8234 | kobject_put(&sdp->sd_kobj); |
8235 | + wait_for_completion(&sdp->sd_kobj_unregister); |
8236 | } |
8237 | |
8238 | static int gfs2_uevent(struct kset *kset, struct kobject *kobj, |
8239 | diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c |
8240 | index 9287eb666322c..2db17fdf516b9 100644 |
8241 | --- a/fs/nfs/namespace.c |
8242 | +++ b/fs/nfs/namespace.c |
8243 | @@ -31,9 +31,9 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ; |
8244 | /* |
8245 | * nfs_path - reconstruct the path given an arbitrary dentry |
8246 | * @base - used to return pointer to the end of devname part of path |
8247 | - * @dentry - pointer to dentry |
8248 | + * @dentry_in - pointer to dentry |
8249 | * @buffer - result buffer |
8250 | - * @buflen - length of buffer |
8251 | + * @buflen_in - length of buffer |
8252 | * @flags - options (see below) |
8253 | * |
8254 | * Helper function for constructing the server pathname |
8255 | @@ -48,15 +48,19 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ; |
8256 | * the original device (export) name |
8257 | * (if unset, the original name is returned verbatim) |
8258 | */ |
8259 | -char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen, |
8260 | - unsigned flags) |
8261 | +char *nfs_path(char **p, struct dentry *dentry_in, char *buffer, |
8262 | + ssize_t buflen_in, unsigned flags) |
8263 | { |
8264 | char *end; |
8265 | int namelen; |
8266 | unsigned seq; |
8267 | const char *base; |
8268 | + struct dentry *dentry; |
8269 | + ssize_t buflen; |
8270 | |
8271 | rename_retry: |
8272 | + buflen = buflen_in; |
8273 | + dentry = dentry_in; |
8274 | end = buffer+buflen; |
8275 | *--end = '\0'; |
8276 | buflen--; |
8277 | diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h |
8278 | index bb322d9de313b..c4a98cbda6dd9 100644 |
8279 | --- a/fs/nfs/nfs4_fs.h |
8280 | +++ b/fs/nfs/nfs4_fs.h |
8281 | @@ -570,6 +570,14 @@ static inline bool nfs4_stateid_is_newer(const nfs4_stateid *s1, const nfs4_stat |
8282 | return (s32)(be32_to_cpu(s1->seqid) - be32_to_cpu(s2->seqid)) > 0; |
8283 | } |
8284 | |
8285 | +static inline bool nfs4_stateid_is_next(const nfs4_stateid *s1, const nfs4_stateid *s2) |
8286 | +{ |
8287 | + u32 seq1 = be32_to_cpu(s1->seqid); |
8288 | + u32 seq2 = be32_to_cpu(s2->seqid); |
8289 | + |
8290 | + return seq2 == seq1 + 1U || (seq2 == 1U && seq1 == 0xffffffffU); |
8291 | +} |
8292 | + |
8293 | static inline void nfs4_stateid_seqid_inc(nfs4_stateid *s1) |
8294 | { |
8295 | u32 seqid = be32_to_cpu(s1->seqid); |
8296 | diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c |
8297 | index 534b6fd70ffdb..6b31cb5f9c9db 100644 |
8298 | --- a/fs/nfs/nfs4file.c |
8299 | +++ b/fs/nfs/nfs4file.c |
8300 | @@ -138,7 +138,8 @@ static ssize_t __nfs4_copy_file_range(struct file *file_in, loff_t pos_in, |
8301 | /* Only offload copy if superblock is the same */ |
8302 | if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) |
8303 | return -EXDEV; |
8304 | - if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY)) |
8305 | + if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY) || |
8306 | + !nfs_server_capable(file_inode(file_in), NFS_CAP_COPY)) |
8307 | return -EOPNOTSUPP; |
8308 | if (file_inode(file_in) == file_inode(file_out)) |
8309 | return -EOPNOTSUPP; |
8310 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
8311 | index 00435556db0ce..ddc900df461c8 100644 |
8312 | --- a/fs/nfs/nfs4proc.c |
8313 | +++ b/fs/nfs/nfs4proc.c |
8314 | @@ -1515,19 +1515,6 @@ static void nfs_state_log_update_open_stateid(struct nfs4_state *state) |
8315 | wake_up_all(&state->waitq); |
8316 | } |
8317 | |
8318 | -static void nfs_state_log_out_of_order_open_stateid(struct nfs4_state *state, |
8319 | - const nfs4_stateid *stateid) |
8320 | -{ |
8321 | - u32 state_seqid = be32_to_cpu(state->open_stateid.seqid); |
8322 | - u32 stateid_seqid = be32_to_cpu(stateid->seqid); |
8323 | - |
8324 | - if (stateid_seqid == state_seqid + 1U || |
8325 | - (stateid_seqid == 1U && state_seqid == 0xffffffffU)) |
8326 | - nfs_state_log_update_open_stateid(state); |
8327 | - else |
8328 | - set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); |
8329 | -} |
8330 | - |
8331 | static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) |
8332 | { |
8333 | struct nfs_client *clp = state->owner->so_server->nfs_client; |
8334 | @@ -1553,21 +1540,19 @@ static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) |
8335 | * i.e. The stateid seqids have to be initialised to 1, and |
8336 | * are then incremented on every state transition. |
8337 | */ |
8338 | -static bool nfs_need_update_open_stateid(struct nfs4_state *state, |
8339 | +static bool nfs_stateid_is_sequential(struct nfs4_state *state, |
8340 | const nfs4_stateid *stateid) |
8341 | { |
8342 | - if (test_bit(NFS_OPEN_STATE, &state->flags) == 0 || |
8343 | - !nfs4_stateid_match_other(stateid, &state->open_stateid)) { |
8344 | + if (test_bit(NFS_OPEN_STATE, &state->flags)) { |
8345 | + /* The common case - we're updating to a new sequence number */ |
8346 | + if (nfs4_stateid_match_other(stateid, &state->open_stateid) && |
8347 | + nfs4_stateid_is_next(&state->open_stateid, stateid)) { |
8348 | + return true; |
8349 | + } |
8350 | + } else { |
8351 | + /* This is the first OPEN in this generation */ |
8352 | if (stateid->seqid == cpu_to_be32(1)) |
8353 | - nfs_state_log_update_open_stateid(state); |
8354 | - else |
8355 | - set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); |
8356 | - return true; |
8357 | - } |
8358 | - |
8359 | - if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) { |
8360 | - nfs_state_log_out_of_order_open_stateid(state, stateid); |
8361 | - return true; |
8362 | + return true; |
8363 | } |
8364 | return false; |
8365 | } |
8366 | @@ -1641,16 +1626,16 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, |
8367 | int status = 0; |
8368 | for (;;) { |
8369 | |
8370 | - if (!nfs_need_update_open_stateid(state, stateid)) |
8371 | - return; |
8372 | - if (!test_bit(NFS_STATE_CHANGE_WAIT, &state->flags)) |
8373 | + if (nfs_stateid_is_sequential(state, stateid)) |
8374 | break; |
8375 | + |
8376 | if (status) |
8377 | break; |
8378 | /* Rely on seqids for serialisation with NFSv4.0 */ |
8379 | if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) |
8380 | break; |
8381 | |
8382 | + set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); |
8383 | prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); |
8384 | /* |
8385 | * Ensure we process the state changes in the same order |
8386 | @@ -1661,6 +1646,7 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, |
8387 | spin_unlock(&state->owner->so_lock); |
8388 | rcu_read_unlock(); |
8389 | trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); |
8390 | + |
8391 | if (!signal_pending(current)) { |
8392 | if (schedule_timeout(5*HZ) == 0) |
8393 | status = -EAGAIN; |
8394 | @@ -3397,7 +3383,8 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, |
8395 | __be32 seqid_open; |
8396 | u32 dst_seqid; |
8397 | bool ret; |
8398 | - int seq; |
8399 | + int seq, status = -EAGAIN; |
8400 | + DEFINE_WAIT(wait); |
8401 | |
8402 | for (;;) { |
8403 | ret = false; |
8404 | @@ -3409,15 +3396,41 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, |
8405 | continue; |
8406 | break; |
8407 | } |
8408 | + |
8409 | + write_seqlock(&state->seqlock); |
8410 | seqid_open = state->open_stateid.seqid; |
8411 | - if (read_seqretry(&state->seqlock, seq)) |
8412 | - continue; |
8413 | |
8414 | dst_seqid = be32_to_cpu(dst->seqid); |
8415 | - if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) >= 0) |
8416 | - dst->seqid = cpu_to_be32(dst_seqid + 1); |
8417 | - else |
8418 | + |
8419 | + /* Did another OPEN bump the state's seqid? try again: */ |
8420 | + if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) { |
8421 | dst->seqid = seqid_open; |
8422 | + write_sequnlock(&state->seqlock); |
8423 | + ret = true; |
8424 | + break; |
8425 | + } |
8426 | + |
8427 | + /* server says we're behind but we haven't seen the update yet */ |
8428 | + set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); |
8429 | + prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); |
8430 | + write_sequnlock(&state->seqlock); |
8431 | + trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); |
8432 | + |
8433 | + if (signal_pending(current)) |
8434 | + status = -EINTR; |
8435 | + else |
8436 | + if (schedule_timeout(5*HZ) != 0) |
8437 | + status = 0; |
8438 | + |
8439 | + finish_wait(&state->waitq, &wait); |
8440 | + |
8441 | + if (!status) |
8442 | + continue; |
8443 | + if (status == -EINTR) |
8444 | + break; |
8445 | + |
8446 | + /* we slept the whole 5 seconds, we must have lost a seqid */ |
8447 | + dst->seqid = cpu_to_be32(dst_seqid + 1); |
8448 | ret = true; |
8449 | break; |
8450 | } |
8451 | @@ -7846,9 +7859,11 @@ int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, |
8452 | * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or |
8453 | * DS flags set. |
8454 | */ |
8455 | -static int nfs4_check_cl_exchange_flags(u32 flags) |
8456 | +static int nfs4_check_cl_exchange_flags(u32 flags, u32 version) |
8457 | { |
8458 | - if (flags & ~EXCHGID4_FLAG_MASK_R) |
8459 | + if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R)) |
8460 | + goto out_inval; |
8461 | + else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R)) |
8462 | goto out_inval; |
8463 | if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && |
8464 | (flags & EXCHGID4_FLAG_USE_NON_PNFS)) |
8465 | @@ -8261,7 +8276,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre |
8466 | if (status != 0) |
8467 | goto out; |
8468 | |
8469 | - status = nfs4_check_cl_exchange_flags(resp->flags); |
8470 | + status = nfs4_check_cl_exchange_flags(resp->flags, |
8471 | + clp->cl_mvops->minor_version); |
8472 | if (status != 0) |
8473 | goto out; |
8474 | |
8475 | diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h |
8476 | index 9398c0b6e0a34..2295a934a154e 100644 |
8477 | --- a/fs/nfs/nfs4trace.h |
8478 | +++ b/fs/nfs/nfs4trace.h |
8479 | @@ -1291,6 +1291,7 @@ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_setattr); |
8480 | DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_delegreturn); |
8481 | DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update); |
8482 | DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_wait); |
8483 | +DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_close_stateid_update_wait); |
8484 | |
8485 | DECLARE_EVENT_CLASS(nfs4_getattr_event, |
8486 | TP_PROTO( |
8487 | diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c |
8488 | index c83ddac22f38f..754c763374dd5 100644 |
8489 | --- a/fs/nfsd/nfsproc.c |
8490 | +++ b/fs/nfsd/nfsproc.c |
8491 | @@ -118,6 +118,13 @@ done: |
8492 | return nfsd_return_attrs(nfserr, resp); |
8493 | } |
8494 | |
8495 | +/* Obsolete, replaced by MNTPROC_MNT. */ |
8496 | +static __be32 |
8497 | +nfsd_proc_root(struct svc_rqst *rqstp) |
8498 | +{ |
8499 | + return nfs_ok; |
8500 | +} |
8501 | + |
8502 | /* |
8503 | * Look up a path name component |
8504 | * Note: the dentry in the resp->fh may be negative if the file |
8505 | @@ -203,6 +210,13 @@ nfsd_proc_read(struct svc_rqst *rqstp) |
8506 | return fh_getattr(&resp->fh, &resp->stat); |
8507 | } |
8508 | |
8509 | +/* Reserved */ |
8510 | +static __be32 |
8511 | +nfsd_proc_writecache(struct svc_rqst *rqstp) |
8512 | +{ |
8513 | + return nfs_ok; |
8514 | +} |
8515 | + |
8516 | /* |
8517 | * Write data to a file |
8518 | * N.B. After this call resp->fh needs an fh_put |
8519 | @@ -617,6 +631,7 @@ static const struct svc_procedure nfsd_procedures2[18] = { |
8520 | .pc_xdrressize = ST+AT, |
8521 | }, |
8522 | [NFSPROC_ROOT] = { |
8523 | + .pc_func = nfsd_proc_root, |
8524 | .pc_decode = nfssvc_decode_void, |
8525 | .pc_encode = nfssvc_encode_void, |
8526 | .pc_argsize = sizeof(struct nfsd_void), |
8527 | @@ -654,6 +669,7 @@ static const struct svc_procedure nfsd_procedures2[18] = { |
8528 | .pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4, |
8529 | }, |
8530 | [NFSPROC_WRITECACHE] = { |
8531 | + .pc_func = nfsd_proc_writecache, |
8532 | .pc_decode = nfssvc_decode_void, |
8533 | .pc_encode = nfssvc_encode_void, |
8534 | .pc_argsize = sizeof(struct nfsd_void), |
8535 | diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c |
8536 | index e4b52783819db..992b74f9c9414 100644 |
8537 | --- a/fs/ubifs/debug.c |
8538 | +++ b/fs/ubifs/debug.c |
8539 | @@ -1123,6 +1123,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir) |
8540 | err = PTR_ERR(dent); |
8541 | if (err == -ENOENT) |
8542 | break; |
8543 | + kfree(pdent); |
8544 | return err; |
8545 | } |
8546 | |
8547 | diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c |
8548 | index 5f2ac5ef0891e..f78c3e3ef9314 100644 |
8549 | --- a/fs/ubifs/journal.c |
8550 | +++ b/fs/ubifs/journal.c |
8551 | @@ -894,6 +894,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) |
8552 | if (err == -ENOENT) |
8553 | break; |
8554 | |
8555 | + kfree(pxent); |
8556 | goto out_release; |
8557 | } |
8558 | |
8559 | @@ -906,6 +907,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) |
8560 | ubifs_err(c, "dead directory entry '%s', error %d", |
8561 | xent->name, err); |
8562 | ubifs_ro_mode(c, err); |
8563 | + kfree(pxent); |
8564 | kfree(xent); |
8565 | goto out_release; |
8566 | } |
8567 | @@ -936,8 +938,6 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) |
8568 | inode->i_ino); |
8569 | release_head(c, BASEHD); |
8570 | |
8571 | - ubifs_add_auth_dirt(c, lnum); |
8572 | - |
8573 | if (last_reference) { |
8574 | err = ubifs_tnc_remove_ino(c, inode->i_ino); |
8575 | if (err) |
8576 | @@ -947,6 +947,8 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) |
8577 | } else { |
8578 | union ubifs_key key; |
8579 | |
8580 | + ubifs_add_auth_dirt(c, lnum); |
8581 | + |
8582 | ino_key_init(c, &key, inode->i_ino); |
8583 | err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash); |
8584 | } |
8585 | diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c |
8586 | index 283f9eb48410d..b0117878b3a0c 100644 |
8587 | --- a/fs/ubifs/orphan.c |
8588 | +++ b/fs/ubifs/orphan.c |
8589 | @@ -173,6 +173,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) |
8590 | err = PTR_ERR(xent); |
8591 | if (err == -ENOENT) |
8592 | break; |
8593 | + kfree(pxent); |
8594 | return err; |
8595 | } |
8596 | |
8597 | @@ -182,6 +183,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) |
8598 | |
8599 | xattr_orphan = orphan_add(c, xattr_inum, orphan); |
8600 | if (IS_ERR(xattr_orphan)) { |
8601 | + kfree(pxent); |
8602 | kfree(xent); |
8603 | return PTR_ERR(xattr_orphan); |
8604 | } |
8605 | diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c |
8606 | index 7fc2f3f07c16e..e49bd69dfc1c8 100644 |
8607 | --- a/fs/ubifs/super.c |
8608 | +++ b/fs/ubifs/super.c |
8609 | @@ -1092,14 +1092,20 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, |
8610 | break; |
8611 | } |
8612 | case Opt_auth_key: |
8613 | - c->auth_key_name = kstrdup(args[0].from, GFP_KERNEL); |
8614 | - if (!c->auth_key_name) |
8615 | - return -ENOMEM; |
8616 | + if (!is_remount) { |
8617 | + c->auth_key_name = kstrdup(args[0].from, |
8618 | + GFP_KERNEL); |
8619 | + if (!c->auth_key_name) |
8620 | + return -ENOMEM; |
8621 | + } |
8622 | break; |
8623 | case Opt_auth_hash_name: |
8624 | - c->auth_hash_name = kstrdup(args[0].from, GFP_KERNEL); |
8625 | - if (!c->auth_hash_name) |
8626 | - return -ENOMEM; |
8627 | + if (!is_remount) { |
8628 | + c->auth_hash_name = kstrdup(args[0].from, |
8629 | + GFP_KERNEL); |
8630 | + if (!c->auth_hash_name) |
8631 | + return -ENOMEM; |
8632 | + } |
8633 | break; |
8634 | case Opt_ignore: |
8635 | break; |
8636 | @@ -1123,6 +1129,18 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, |
8637 | return 0; |
8638 | } |
8639 | |
8640 | +/* |
8641 | + * ubifs_release_options - release mount parameters which have been dumped. |
8642 | + * @c: UBIFS file-system description object |
8643 | + */ |
8644 | +static void ubifs_release_options(struct ubifs_info *c) |
8645 | +{ |
8646 | + kfree(c->auth_key_name); |
8647 | + c->auth_key_name = NULL; |
8648 | + kfree(c->auth_hash_name); |
8649 | + c->auth_hash_name = NULL; |
8650 | +} |
8651 | + |
8652 | /** |
8653 | * destroy_journal - destroy journal data structures. |
8654 | * @c: UBIFS file-system description object |
8655 | @@ -1295,7 +1313,7 @@ static int mount_ubifs(struct ubifs_info *c) |
8656 | |
8657 | err = ubifs_read_superblock(c); |
8658 | if (err) |
8659 | - goto out_free; |
8660 | + goto out_auth; |
8661 | |
8662 | c->probing = 0; |
8663 | |
8664 | @@ -1307,18 +1325,18 @@ static int mount_ubifs(struct ubifs_info *c) |
8665 | ubifs_err(c, "'compressor \"%s\" is not compiled in", |
8666 | ubifs_compr_name(c, c->default_compr)); |
8667 | err = -ENOTSUPP; |
8668 | - goto out_free; |
8669 | + goto out_auth; |
8670 | } |
8671 | |
8672 | err = init_constants_sb(c); |
8673 | if (err) |
8674 | - goto out_free; |
8675 | + goto out_auth; |
8676 | |
8677 | sz = ALIGN(c->max_idx_node_sz, c->min_io_size) * 2; |
8678 | c->cbuf = kmalloc(sz, GFP_NOFS); |
8679 | if (!c->cbuf) { |
8680 | err = -ENOMEM; |
8681 | - goto out_free; |
8682 | + goto out_auth; |
8683 | } |
8684 | |
8685 | err = alloc_wbufs(c); |
8686 | @@ -1593,6 +1611,8 @@ out_wbufs: |
8687 | free_wbufs(c); |
8688 | out_cbuf: |
8689 | kfree(c->cbuf); |
8690 | +out_auth: |
8691 | + ubifs_exit_authentication(c); |
8692 | out_free: |
8693 | kfree(c->write_reserve_buf); |
8694 | kfree(c->bu.buf); |
8695 | @@ -1632,8 +1652,7 @@ static void ubifs_umount(struct ubifs_info *c) |
8696 | ubifs_lpt_free(c, 0); |
8697 | ubifs_exit_authentication(c); |
8698 | |
8699 | - kfree(c->auth_key_name); |
8700 | - kfree(c->auth_hash_name); |
8701 | + ubifs_release_options(c); |
8702 | kfree(c->cbuf); |
8703 | kfree(c->rcvrd_mst_node); |
8704 | kfree(c->mst_node); |
8705 | @@ -2201,6 +2220,7 @@ out_umount: |
8706 | out_unlock: |
8707 | mutex_unlock(&c->umount_mutex); |
8708 | out_close: |
8709 | + ubifs_release_options(c); |
8710 | ubi_close_volume(c->ubi); |
8711 | out: |
8712 | return err; |
8713 | diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c |
8714 | index e8e7b0e9532e8..33742ee3945b3 100644 |
8715 | --- a/fs/ubifs/tnc.c |
8716 | +++ b/fs/ubifs/tnc.c |
8717 | @@ -2885,6 +2885,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) |
8718 | err = PTR_ERR(xent); |
8719 | if (err == -ENOENT) |
8720 | break; |
8721 | + kfree(pxent); |
8722 | return err; |
8723 | } |
8724 | |
8725 | @@ -2898,6 +2899,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) |
8726 | fname_len(&nm) = le16_to_cpu(xent->nlen); |
8727 | err = ubifs_tnc_remove_nm(c, &key1, &nm); |
8728 | if (err) { |
8729 | + kfree(pxent); |
8730 | kfree(xent); |
8731 | return err; |
8732 | } |
8733 | @@ -2906,6 +2908,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) |
8734 | highest_ino_key(c, &key2, xattr_inum); |
8735 | err = ubifs_tnc_remove_range(c, &key1, &key2); |
8736 | if (err) { |
8737 | + kfree(pxent); |
8738 | kfree(xent); |
8739 | return err; |
8740 | } |
8741 | diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c |
8742 | index 9aefbb60074ff..a0b9b349efe65 100644 |
8743 | --- a/fs/ubifs/xattr.c |
8744 | +++ b/fs/ubifs/xattr.c |
8745 | @@ -522,6 +522,7 @@ int ubifs_purge_xattrs(struct inode *host) |
8746 | xent->name, err); |
8747 | ubifs_ro_mode(c, err); |
8748 | kfree(pxent); |
8749 | + kfree(xent); |
8750 | return err; |
8751 | } |
8752 | |
8753 | @@ -531,6 +532,7 @@ int ubifs_purge_xattrs(struct inode *host) |
8754 | err = remove_xattr(c, host, xino, &nm); |
8755 | if (err) { |
8756 | kfree(pxent); |
8757 | + kfree(xent); |
8758 | iput(xino); |
8759 | ubifs_err(c, "cannot remove xattr, error %d", err); |
8760 | return err; |
8761 | diff --git a/fs/udf/super.c b/fs/udf/super.c |
8762 | index a0cd766b41cdb..4aba4878ed967 100644 |
8763 | --- a/fs/udf/super.c |
8764 | +++ b/fs/udf/super.c |
8765 | @@ -1703,7 +1703,8 @@ static noinline int udf_process_sequence( |
8766 | "Pointers (max %u supported)\n", |
8767 | UDF_MAX_TD_NESTING); |
8768 | brelse(bh); |
8769 | - return -EIO; |
8770 | + ret = -EIO; |
8771 | + goto out; |
8772 | } |
8773 | |
8774 | vdp = (struct volDescPtr *)bh->b_data; |
8775 | @@ -1723,7 +1724,8 @@ static noinline int udf_process_sequence( |
8776 | curr = get_volume_descriptor_record(ident, bh, &data); |
8777 | if (IS_ERR(curr)) { |
8778 | brelse(bh); |
8779 | - return PTR_ERR(curr); |
8780 | + ret = PTR_ERR(curr); |
8781 | + goto out; |
8782 | } |
8783 | /* Descriptor we don't care about? */ |
8784 | if (!curr) |
8785 | @@ -1745,28 +1747,31 @@ static noinline int udf_process_sequence( |
8786 | */ |
8787 | if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) { |
8788 | udf_err(sb, "Primary Volume Descriptor not found!\n"); |
8789 | - return -EAGAIN; |
8790 | + ret = -EAGAIN; |
8791 | + goto out; |
8792 | } |
8793 | ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block); |
8794 | if (ret < 0) |
8795 | - return ret; |
8796 | + goto out; |
8797 | |
8798 | if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) { |
8799 | ret = udf_load_logicalvol(sb, |
8800 | data.vds[VDS_POS_LOGICAL_VOL_DESC].block, |
8801 | fileset); |
8802 | if (ret < 0) |
8803 | - return ret; |
8804 | + goto out; |
8805 | } |
8806 | |
8807 | /* Now handle prevailing Partition Descriptors */ |
8808 | for (i = 0; i < data.num_part_descs; i++) { |
8809 | ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block); |
8810 | if (ret < 0) |
8811 | - return ret; |
8812 | + goto out; |
8813 | } |
8814 | - |
8815 | - return 0; |
8816 | + ret = 0; |
8817 | +out: |
8818 | + kfree(data.part_descs_loc); |
8819 | + return ret; |
8820 | } |
8821 | |
8822 | /* |
8823 | diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c |
8824 | index f8db3fe616df9..c114d24be6193 100644 |
8825 | --- a/fs/xfs/libxfs/xfs_bmap.c |
8826 | +++ b/fs/xfs/libxfs/xfs_bmap.c |
8827 | @@ -4985,20 +4985,25 @@ xfs_bmap_del_extent_real( |
8828 | |
8829 | flags = XFS_ILOG_CORE; |
8830 | if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { |
8831 | - xfs_fsblock_t bno; |
8832 | xfs_filblks_t len; |
8833 | xfs_extlen_t mod; |
8834 | |
8835 | - bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize, |
8836 | - &mod); |
8837 | - ASSERT(mod == 0); |
8838 | len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize, |
8839 | &mod); |
8840 | ASSERT(mod == 0); |
8841 | |
8842 | - error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); |
8843 | - if (error) |
8844 | - goto done; |
8845 | + if (!(bflags & XFS_BMAPI_REMAP)) { |
8846 | + xfs_fsblock_t bno; |
8847 | + |
8848 | + bno = div_u64_rem(del->br_startblock, |
8849 | + mp->m_sb.sb_rextsize, &mod); |
8850 | + ASSERT(mod == 0); |
8851 | + |
8852 | + error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); |
8853 | + if (error) |
8854 | + goto done; |
8855 | + } |
8856 | + |
8857 | do_fx = 0; |
8858 | nblks = len * mp->m_sb.sb_rextsize; |
8859 | qfield = XFS_TRANS_DQ_RTBCOUNT; |
8860 | diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c |
8861 | index b583669370825..6d5ddc4e5135a 100644 |
8862 | --- a/fs/xfs/xfs_rtalloc.c |
8863 | +++ b/fs/xfs/xfs_rtalloc.c |
8864 | @@ -1021,10 +1021,13 @@ xfs_growfs_rt( |
8865 | xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL); |
8866 | xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); |
8867 | /* |
8868 | - * Update the bitmap inode's size. |
8869 | + * Update the bitmap inode's size ondisk and incore. We need |
8870 | + * to update the incore size so that inode inactivation won't |
8871 | + * punch what it thinks are "posteof" blocks. |
8872 | */ |
8873 | mp->m_rbmip->i_d.di_size = |
8874 | nsbp->sb_rbmblocks * nsbp->sb_blocksize; |
8875 | + i_size_write(VFS_I(mp->m_rbmip), mp->m_rbmip->i_d.di_size); |
8876 | xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE); |
8877 | /* |
8878 | * Get the summary inode into the transaction. |
8879 | @@ -1032,9 +1035,12 @@ xfs_growfs_rt( |
8880 | xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL); |
8881 | xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); |
8882 | /* |
8883 | - * Update the summary inode's size. |
8884 | + * Update the summary inode's size. We need to update the |
8885 | + * incore size so that inode inactivation won't punch what it |
8886 | + * thinks are "posteof" blocks. |
8887 | */ |
8888 | mp->m_rsumip->i_d.di_size = nmp->m_rsumsize; |
8889 | + i_size_write(VFS_I(mp->m_rsumip), mp->m_rsumip->i_d.di_size); |
8890 | xfs_trans_log_inode(tp, mp->m_rsumip, XFS_ILOG_CORE); |
8891 | /* |
8892 | * Copy summary data from old to new sizes. |
8893 | diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h |
8894 | index 774f7d3b8f6af..369221fd55187 100644 |
8895 | --- a/include/linux/hil_mlc.h |
8896 | +++ b/include/linux/hil_mlc.h |
8897 | @@ -103,7 +103,7 @@ struct hilse_node { |
8898 | |
8899 | /* Methods for back-end drivers, e.g. hp_sdc_mlc */ |
8900 | typedef int (hil_mlc_cts) (hil_mlc *mlc); |
8901 | -typedef void (hil_mlc_out) (hil_mlc *mlc); |
8902 | +typedef int (hil_mlc_out) (hil_mlc *mlc); |
8903 | typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout); |
8904 | |
8905 | struct hil_mlc_devinfo { |
8906 | diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h |
8907 | index 145c38e351c25..6655ce32feff1 100644 |
8908 | --- a/include/linux/usb/pd.h |
8909 | +++ b/include/linux/usb/pd.h |
8910 | @@ -442,6 +442,7 @@ static inline unsigned int rdo_max_power(u32 rdo) |
8911 | #define PD_T_ERROR_RECOVERY 100 /* minimum 25 is insufficient */ |
8912 | #define PD_T_SRCSWAPSTDBY 625 /* Maximum of 650ms */ |
8913 | #define PD_T_NEWSRC 250 /* Maximum of 275ms */ |
8914 | +#define PD_T_SWAP_SRC_START 20 /* Minimum of 20ms */ |
8915 | |
8916 | #define PD_T_DRP_TRY 100 /* 75 - 150 ms */ |
8917 | #define PD_T_DRP_TRYWAIT 600 /* 400 - 800 ms */ |
8918 | diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h |
8919 | index b65c7ee75bc7e..035e59eef05bb 100644 |
8920 | --- a/include/uapi/linux/btrfs_tree.h |
8921 | +++ b/include/uapi/linux/btrfs_tree.h |
8922 | @@ -4,6 +4,11 @@ |
8923 | |
8924 | #include <linux/btrfs.h> |
8925 | #include <linux/types.h> |
8926 | +#ifdef __KERNEL__ |
8927 | +#include <linux/stddef.h> |
8928 | +#else |
8929 | +#include <stddef.h> |
8930 | +#endif |
8931 | |
8932 | /* |
8933 | * This header contains the structure definitions and constants used |
8934 | @@ -650,6 +655,15 @@ struct btrfs_root_item { |
8935 | __le64 reserved[8]; /* for future */ |
8936 | } __attribute__ ((__packed__)); |
8937 | |
8938 | +/* |
8939 | + * Btrfs root item used to be smaller than current size. The old format ends |
8940 | + * at where member generation_v2 is. |
8941 | + */ |
8942 | +static inline __u32 btrfs_legacy_root_item_size(void) |
8943 | +{ |
8944 | + return offsetof(struct btrfs_root_item, generation_v2); |
8945 | +} |
8946 | + |
8947 | /* |
8948 | * this is used for both forward and backward root refs |
8949 | */ |
8950 | diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h |
8951 | index 8572930cf5b00..54a78529c8b38 100644 |
8952 | --- a/include/uapi/linux/nfs4.h |
8953 | +++ b/include/uapi/linux/nfs4.h |
8954 | @@ -136,6 +136,8 @@ |
8955 | |
8956 | #define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000 |
8957 | #define EXCHGID4_FLAG_CONFIRMED_R 0x80000000 |
8958 | + |
8959 | +#define EXCHGID4_FLAG_SUPP_FENCE_OPS 0x00000004 |
8960 | /* |
8961 | * Since the validity of these bits depends on whether |
8962 | * they're set in the argument or response, have separate |
8963 | @@ -143,6 +145,7 @@ |
8964 | */ |
8965 | #define EXCHGID4_FLAG_MASK_A 0x40070103 |
8966 | #define EXCHGID4_FLAG_MASK_R 0x80070103 |
8967 | +#define EXCHGID4_2_FLAG_MASK_R 0x80070107 |
8968 | |
8969 | #define SEQ4_STATUS_CB_PATH_DOWN 0x00000001 |
8970 | #define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002 |
8971 | diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h |
8972 | index 530638dffd934..3210b3c82a4a2 100644 |
8973 | --- a/include/uapi/linux/videodev2.h |
8974 | +++ b/include/uapi/linux/videodev2.h |
8975 | @@ -371,9 +371,9 @@ enum v4l2_hsv_encoding { |
8976 | |
8977 | enum v4l2_quantization { |
8978 | /* |
8979 | - * The default for R'G'B' quantization is always full range, except |
8980 | - * for the BT2020 colorspace. For Y'CbCr the quantization is always |
8981 | - * limited range, except for COLORSPACE_JPEG: this is full range. |
8982 | + * The default for R'G'B' quantization is always full range. |
8983 | + * For Y'CbCr the quantization is always limited range, except |
8984 | + * for COLORSPACE_JPEG: this is full range. |
8985 | */ |
8986 | V4L2_QUANTIZATION_DEFAULT = 0, |
8987 | V4L2_QUANTIZATION_FULL_RANGE = 1, |
8988 | @@ -382,14 +382,13 @@ enum v4l2_quantization { |
8989 | |
8990 | /* |
8991 | * Determine how QUANTIZATION_DEFAULT should map to a proper quantization. |
8992 | - * This depends on whether the image is RGB or not, the colorspace and the |
8993 | - * Y'CbCr encoding. |
8994 | + * This depends on whether the image is RGB or not, the colorspace. |
8995 | + * The Y'CbCr encoding is not used anymore, but is still there for backwards |
8996 | + * compatibility. |
8997 | */ |
8998 | #define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \ |
8999 | - (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \ |
9000 | - V4L2_QUANTIZATION_LIM_RANGE : \ |
9001 | - (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \ |
9002 | - V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE)) |
9003 | + (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \ |
9004 | + V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE) |
9005 | |
9006 | /* |
9007 | * Deprecated names for opRGB colorspace (IEC 61966-2-5) |
9008 | diff --git a/include/xen/events.h b/include/xen/events.h |
9009 | index c0e6a05983976..31952308a6d56 100644 |
9010 | --- a/include/xen/events.h |
9011 | +++ b/include/xen/events.h |
9012 | @@ -14,11 +14,16 @@ |
9013 | |
9014 | unsigned xen_evtchn_nr_channels(void); |
9015 | |
9016 | -int bind_evtchn_to_irq(unsigned int evtchn); |
9017 | -int bind_evtchn_to_irqhandler(unsigned int evtchn, |
9018 | +int bind_evtchn_to_irq(evtchn_port_t evtchn); |
9019 | +int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn); |
9020 | +int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, |
9021 | irq_handler_t handler, |
9022 | unsigned long irqflags, const char *devname, |
9023 | void *dev_id); |
9024 | +int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn, |
9025 | + irq_handler_t handler, |
9026 | + unsigned long irqflags, const char *devname, |
9027 | + void *dev_id); |
9028 | int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu); |
9029 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, |
9030 | irq_handler_t handler, |
9031 | @@ -31,13 +36,21 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, |
9032 | const char *devname, |
9033 | void *dev_id); |
9034 | int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, |
9035 | - unsigned int remote_port); |
9036 | + evtchn_port_t remote_port); |
9037 | +int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain, |
9038 | + evtchn_port_t remote_port); |
9039 | int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, |
9040 | - unsigned int remote_port, |
9041 | + evtchn_port_t remote_port, |
9042 | irq_handler_t handler, |
9043 | unsigned long irqflags, |
9044 | const char *devname, |
9045 | void *dev_id); |
9046 | +int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain, |
9047 | + evtchn_port_t remote_port, |
9048 | + irq_handler_t handler, |
9049 | + unsigned long irqflags, |
9050 | + const char *devname, |
9051 | + void *dev_id); |
9052 | |
9053 | /* |
9054 | * Common unbind function for all event sources. Takes IRQ to unbind from. |
9055 | @@ -46,6 +59,14 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, |
9056 | */ |
9057 | void unbind_from_irqhandler(unsigned int irq, void *dev_id); |
9058 | |
9059 | +/* |
9060 | + * Send late EOI for an IRQ bound to an event channel via one of the *_lateeoi |
9061 | + * functions above. |
9062 | + */ |
9063 | +void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags); |
9064 | +/* Signal an event was spurious, i.e. there was no action resulting from it. */ |
9065 | +#define XEN_EOI_FLAG_SPURIOUS 0x00000001 |
9066 | + |
9067 | #define XEN_IRQ_PRIORITY_MAX EVTCHN_FIFO_PRIORITY_MAX |
9068 | #define XEN_IRQ_PRIORITY_DEFAULT EVTCHN_FIFO_PRIORITY_DEFAULT |
9069 | #define XEN_IRQ_PRIORITY_MIN EVTCHN_FIFO_PRIORITY_MIN |
9070 | diff --git a/init/Kconfig b/init/Kconfig |
9071 | index 6db3e310a5e42..96fc45d1b686b 100644 |
9072 | --- a/init/Kconfig |
9073 | +++ b/init/Kconfig |
9074 | @@ -594,7 +594,8 @@ config IKHEADERS |
9075 | |
9076 | config LOG_BUF_SHIFT |
9077 | int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" |
9078 | - range 12 25 |
9079 | + range 12 25 if !H8300 |
9080 | + range 12 19 if H8300 |
9081 | default 17 |
9082 | depends on PRINTK |
9083 | help |
9084 | diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c |
9085 | index 507474f79195f..a67bfa803d983 100644 |
9086 | --- a/kernel/bpf/verifier.c |
9087 | +++ b/kernel/bpf/verifier.c |
9088 | @@ -4427,6 +4427,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, |
9089 | dst, reg_type_str[ptr_reg->type]); |
9090 | return -EACCES; |
9091 | case CONST_PTR_TO_MAP: |
9092 | + /* smin_val represents the known value */ |
9093 | + if (known && smin_val == 0 && opcode == BPF_ADD) |
9094 | + break; |
9095 | + /* fall-through */ |
9096 | case PTR_TO_PACKET_END: |
9097 | case PTR_TO_SOCKET: |
9098 | case PTR_TO_SOCKET_OR_NULL: |
9099 | diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c |
9100 | index 2222f3225e53d..097ab02989f92 100644 |
9101 | --- a/kernel/debug/debug_core.c |
9102 | +++ b/kernel/debug/debug_core.c |
9103 | @@ -96,14 +96,6 @@ int dbg_switch_cpu; |
9104 | /* Use kdb or gdbserver mode */ |
9105 | int dbg_kdb_mode = 1; |
9106 | |
9107 | -static int __init opt_kgdb_con(char *str) |
9108 | -{ |
9109 | - kgdb_use_con = 1; |
9110 | - return 0; |
9111 | -} |
9112 | - |
9113 | -early_param("kgdbcon", opt_kgdb_con); |
9114 | - |
9115 | module_param(kgdb_use_con, int, 0644); |
9116 | module_param(kgdbreboot, int, 0644); |
9117 | |
9118 | @@ -876,6 +868,20 @@ static struct console kgdbcons = { |
9119 | .index = -1, |
9120 | }; |
9121 | |
9122 | +static int __init opt_kgdb_con(char *str) |
9123 | +{ |
9124 | + kgdb_use_con = 1; |
9125 | + |
9126 | + if (kgdb_io_module_registered && !kgdb_con_registered) { |
9127 | + register_console(&kgdbcons); |
9128 | + kgdb_con_registered = 1; |
9129 | + } |
9130 | + |
9131 | + return 0; |
9132 | +} |
9133 | + |
9134 | +early_param("kgdbcon", opt_kgdb_con); |
9135 | + |
9136 | #ifdef CONFIG_MAGIC_SYSRQ |
9137 | static void sysrq_handle_dbg(int key) |
9138 | { |
9139 | diff --git a/kernel/futex.c b/kernel/futex.c |
9140 | index 5660c02b01b05..17fba7a986e0f 100644 |
9141 | --- a/kernel/futex.c |
9142 | +++ b/kernel/futex.c |
9143 | @@ -1594,8 +1594,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ |
9144 | */ |
9145 | newval = FUTEX_WAITERS | task_pid_vnr(new_owner); |
9146 | |
9147 | - if (unlikely(should_fail_futex(true))) |
9148 | + if (unlikely(should_fail_futex(true))) { |
9149 | ret = -EFAULT; |
9150 | + goto out_unlock; |
9151 | + } |
9152 | |
9153 | ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); |
9154 | if (!ret && (curval != uval)) { |
9155 | diff --git a/kernel/seccomp.c b/kernel/seccomp.c |
9156 | index e0fd972356539..0d991e9626f68 100644 |
9157 | --- a/kernel/seccomp.c |
9158 | +++ b/kernel/seccomp.c |
9159 | @@ -1219,13 +1219,7 @@ static const struct file_operations seccomp_notify_ops = { |
9160 | |
9161 | static struct file *init_listener(struct seccomp_filter *filter) |
9162 | { |
9163 | - struct file *ret = ERR_PTR(-EBUSY); |
9164 | - struct seccomp_filter *cur; |
9165 | - |
9166 | - for (cur = current->seccomp.filter; cur; cur = cur->prev) { |
9167 | - if (cur->notif) |
9168 | - goto out; |
9169 | - } |
9170 | + struct file *ret; |
9171 | |
9172 | ret = ERR_PTR(-ENOMEM); |
9173 | filter->notif = kzalloc(sizeof(*(filter->notif)), GFP_KERNEL); |
9174 | @@ -1252,6 +1246,31 @@ out: |
9175 | return ret; |
9176 | } |
9177 | |
9178 | +/* |
9179 | + * Does @new_child have a listener while an ancestor also has a listener? |
9180 | + * If so, we'll want to reject this filter. |
9181 | + * This only has to be tested for the current process, even in the TSYNC case, |
9182 | + * because TSYNC installs @child with the same parent on all threads. |
9183 | + * Note that @new_child is not hooked up to its parent at this point yet, so |
9184 | + * we use current->seccomp.filter. |
9185 | + */ |
9186 | +static bool has_duplicate_listener(struct seccomp_filter *new_child) |
9187 | +{ |
9188 | + struct seccomp_filter *cur; |
9189 | + |
9190 | + /* must be protected against concurrent TSYNC */ |
9191 | + lockdep_assert_held(¤t->sighand->siglock); |
9192 | + |
9193 | + if (!new_child->notif) |
9194 | + return false; |
9195 | + for (cur = current->seccomp.filter; cur; cur = cur->prev) { |
9196 | + if (cur->notif) |
9197 | + return true; |
9198 | + } |
9199 | + |
9200 | + return false; |
9201 | +} |
9202 | + |
9203 | /** |
9204 | * seccomp_set_mode_filter: internal function for setting seccomp filter |
9205 | * @flags: flags to change filter behavior |
9206 | @@ -1321,6 +1340,11 @@ static long seccomp_set_mode_filter(unsigned int flags, |
9207 | if (!seccomp_may_assign_mode(seccomp_mode)) |
9208 | goto out; |
9209 | |
9210 | + if (has_duplicate_listener(prepared)) { |
9211 | + ret = -EBUSY; |
9212 | + goto out; |
9213 | + } |
9214 | + |
9215 | ret = seccomp_attach_filter(flags, prepared); |
9216 | if (ret) |
9217 | goto out; |
9218 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c |
9219 | index 9a2581fe7ed5f..67cdb401c6ce5 100644 |
9220 | --- a/kernel/trace/ring_buffer.c |
9221 | +++ b/kernel/trace/ring_buffer.c |
9222 | @@ -1717,18 +1717,18 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, |
9223 | { |
9224 | struct ring_buffer_per_cpu *cpu_buffer; |
9225 | unsigned long nr_pages; |
9226 | - int cpu, err = 0; |
9227 | + int cpu, err; |
9228 | |
9229 | /* |
9230 | * Always succeed at resizing a non-existent buffer: |
9231 | */ |
9232 | if (!buffer) |
9233 | - return size; |
9234 | + return 0; |
9235 | |
9236 | /* Make sure the requested buffer exists */ |
9237 | if (cpu_id != RING_BUFFER_ALL_CPUS && |
9238 | !cpumask_test_cpu(cpu_id, buffer->cpumask)) |
9239 | - return size; |
9240 | + return 0; |
9241 | |
9242 | nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
9243 | |
9244 | @@ -1868,7 +1868,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, |
9245 | } |
9246 | |
9247 | mutex_unlock(&buffer->mutex); |
9248 | - return size; |
9249 | + return 0; |
9250 | |
9251 | out_err: |
9252 | for_each_buffer_cpu(buffer, cpu) { |
9253 | diff --git a/lib/scatterlist.c b/lib/scatterlist.c |
9254 | index 5813072bc5895..29346184fcf2e 100644 |
9255 | --- a/lib/scatterlist.c |
9256 | +++ b/lib/scatterlist.c |
9257 | @@ -514,7 +514,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length, |
9258 | elem_len = min_t(u64, length, PAGE_SIZE << order); |
9259 | page = alloc_pages(gfp, order); |
9260 | if (!page) { |
9261 | - sgl_free(sgl); |
9262 | + sgl_free_order(sgl, order); |
9263 | return NULL; |
9264 | } |
9265 | |
9266 | diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c |
9267 | index 12ecacf0c55fb..60eb9a2b209be 100644 |
9268 | --- a/net/9p/trans_fd.c |
9269 | +++ b/net/9p/trans_fd.c |
9270 | @@ -1023,7 +1023,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) |
9271 | |
9272 | csocket = NULL; |
9273 | |
9274 | - if (addr == NULL) |
9275 | + if (!addr || !strlen(addr)) |
9276 | return -EINVAL; |
9277 | |
9278 | if (strlen(addr) >= UNIX_PATH_MAX) { |
9279 | diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c |
9280 | index 3d2e9f944e0fe..49726c378aab3 100644 |
9281 | --- a/net/ceph/messenger.c |
9282 | +++ b/net/ceph/messenger.c |
9283 | @@ -3007,6 +3007,11 @@ static void con_fault(struct ceph_connection *con) |
9284 | ceph_msg_put(con->in_msg); |
9285 | con->in_msg = NULL; |
9286 | } |
9287 | + if (con->out_msg) { |
9288 | + BUG_ON(con->out_msg->con != con); |
9289 | + ceph_msg_put(con->out_msg); |
9290 | + con->out_msg = NULL; |
9291 | + } |
9292 | |
9293 | /* Requeue anything that hasn't been acked */ |
9294 | list_splice_init(&con->out_sent, &con->out_queue); |
9295 | diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c |
9296 | index 41df4c507193b..a6fee86f400ec 100644 |
9297 | --- a/net/sunrpc/xprt.c |
9298 | +++ b/net/sunrpc/xprt.c |
9299 | @@ -1503,10 +1503,13 @@ xprt_transmit(struct rpc_task *task) |
9300 | { |
9301 | struct rpc_rqst *next, *req = task->tk_rqstp; |
9302 | struct rpc_xprt *xprt = req->rq_xprt; |
9303 | - int status; |
9304 | + int counter, status; |
9305 | |
9306 | spin_lock(&xprt->queue_lock); |
9307 | + counter = 0; |
9308 | while (!list_empty(&xprt->xmit_queue)) { |
9309 | + if (++counter == 20) |
9310 | + break; |
9311 | next = list_first_entry(&xprt->xmit_queue, |
9312 | struct rpc_rqst, rq_xmit); |
9313 | xprt_pin_rqst(next); |
9314 | @@ -1514,7 +1517,6 @@ xprt_transmit(struct rpc_task *task) |
9315 | status = xprt_request_transmit(next, task); |
9316 | if (status == -EBADMSG && next != req) |
9317 | status = 0; |
9318 | - cond_resched(); |
9319 | spin_lock(&xprt->queue_lock); |
9320 | xprt_unpin_rqst(next); |
9321 | if (status == 0) { |
9322 | diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c |
9323 | index df011ac334022..79d1005ff2ee3 100644 |
9324 | --- a/samples/bpf/xdpsock_user.c |
9325 | +++ b/samples/bpf/xdpsock_user.c |
9326 | @@ -677,6 +677,7 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds) |
9327 | while (ret != rcvd) { |
9328 | if (ret < 0) |
9329 | exit_with_error(-ret); |
9330 | + complete_tx_l2fwd(xsk, fds); |
9331 | if (xsk_ring_prod__needs_wakeup(&xsk->tx)) |
9332 | kick_tx(xsk); |
9333 | ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx); |
9334 | diff --git a/tools/perf/util/print_binary.c b/tools/perf/util/print_binary.c |
9335 | index 599a1543871de..13fdc51c61d96 100644 |
9336 | --- a/tools/perf/util/print_binary.c |
9337 | +++ b/tools/perf/util/print_binary.c |
9338 | @@ -50,7 +50,7 @@ int is_printable_array(char *p, unsigned int len) |
9339 | |
9340 | len--; |
9341 | |
9342 | - for (i = 0; i < len; i++) { |
9343 | + for (i = 0; i < len && p[i]; i++) { |
9344 | if (!isprint(p[i]) && !isspace(p[i])) |
9345 | return 0; |
9346 | } |
9347 | diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c |
9348 | index 5cbbff416998c..4396faf33394a 100644 |
9349 | --- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c |
9350 | +++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c |
9351 | @@ -19,11 +19,11 @@ |
9352 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) |
9353 | #endif |
9354 | |
9355 | +const char tcp_mem_name[] = "net/ipv4/tcp_mem"; |
9356 | static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx) |
9357 | { |
9358 | - char tcp_mem_name[] = "net/ipv4/tcp_mem"; |
9359 | unsigned char i; |
9360 | - char name[64]; |
9361 | + char name[sizeof(tcp_mem_name)]; |
9362 | int ret; |
9363 | |
9364 | memset(name, 0, sizeof(name)); |
9365 | diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c |
9366 | index 15a329da59fa3..757bdb218a661 100644 |
9367 | --- a/tools/testing/selftests/x86/fsgsbase.c |
9368 | +++ b/tools/testing/selftests/x86/fsgsbase.c |
9369 | @@ -442,6 +442,68 @@ static void test_unexpected_base(void) |
9370 | |
9371 | #define USER_REGS_OFFSET(r) offsetof(struct user_regs_struct, r) |
9372 | |
9373 | +static void test_ptrace_write_gs_read_base(void) |
9374 | +{ |
9375 | + int status; |
9376 | + pid_t child = fork(); |
9377 | + |
9378 | + if (child < 0) |
9379 | + err(1, "fork"); |
9380 | + |
9381 | + if (child == 0) { |
9382 | + printf("[RUN]\tPTRACE_POKE GS, read GSBASE back\n"); |
9383 | + |
9384 | + printf("[RUN]\tARCH_SET_GS to 1\n"); |
9385 | + if (syscall(SYS_arch_prctl, ARCH_SET_GS, 1) != 0) |
9386 | + err(1, "ARCH_SET_GS"); |
9387 | + |
9388 | + if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) |
9389 | + err(1, "PTRACE_TRACEME"); |
9390 | + |
9391 | + raise(SIGTRAP); |
9392 | + _exit(0); |
9393 | + } |
9394 | + |
9395 | + wait(&status); |
9396 | + |
9397 | + if (WSTOPSIG(status) == SIGTRAP) { |
9398 | + unsigned long base; |
9399 | + unsigned long gs_offset = USER_REGS_OFFSET(gs); |
9400 | + unsigned long base_offset = USER_REGS_OFFSET(gs_base); |
9401 | + |
9402 | + /* Read the initial base. It should be 1. */ |
9403 | + base = ptrace(PTRACE_PEEKUSER, child, base_offset, NULL); |
9404 | + if (base == 1) { |
9405 | + printf("[OK]\tGSBASE started at 1\n"); |
9406 | + } else { |
9407 | + nerrs++; |
9408 | + printf("[FAIL]\tGSBASE started at 0x%lx\n", base); |
9409 | + } |
9410 | + |
9411 | + printf("[RUN]\tSet GS = 0x7, read GSBASE\n"); |
9412 | + |
9413 | + /* Poke an LDT selector into GS. */ |
9414 | + if (ptrace(PTRACE_POKEUSER, child, gs_offset, 0x7) != 0) |
9415 | + err(1, "PTRACE_POKEUSER"); |
9416 | + |
9417 | + /* And read the base. */ |
9418 | + base = ptrace(PTRACE_PEEKUSER, child, base_offset, NULL); |
9419 | + |
9420 | + if (base == 0 || base == 1) { |
9421 | + printf("[OK]\tGSBASE reads as 0x%lx with invalid GS\n", base); |
9422 | + } else { |
9423 | + nerrs++; |
9424 | + printf("[FAIL]\tGSBASE=0x%lx (should be 0 or 1)\n", base); |
9425 | + } |
9426 | + } |
9427 | + |
9428 | + ptrace(PTRACE_CONT, child, NULL, NULL); |
9429 | + |
9430 | + wait(&status); |
9431 | + if (!WIFEXITED(status)) |
9432 | + printf("[WARN]\tChild didn't exit cleanly.\n"); |
9433 | +} |
9434 | + |
9435 | static void test_ptrace_write_gsbase(void) |
9436 | { |
9437 | int status; |
9438 | @@ -499,6 +561,9 @@ static void test_ptrace_write_gsbase(void) |
9439 | |
9440 | END: |
9441 | ptrace(PTRACE_CONT, child, NULL, NULL); |
9442 | + wait(&status); |
9443 | + if (!WIFEXITED(status)) |
9444 | + printf("[WARN]\tChild didn't exit cleanly.\n"); |
9445 | } |
9446 | |
9447 | int main() |
9448 | @@ -508,6 +573,9 @@ int main() |
9449 | shared_scratch = mmap(NULL, 4096, PROT_READ | PROT_WRITE, |
9450 | MAP_ANONYMOUS | MAP_SHARED, -1, 0); |
9451 | |
9452 | + /* Do these tests before we have an LDT. */ |
9453 | + test_ptrace_write_gs_read_base(); |
9454 | + |
9455 | /* Probe FSGSBASE */ |
9456 | sethandler(SIGILL, sigill, 0); |
9457 | if (sigsetjmp(jmpbuf, 1) == 0) { |