Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0119-5.4.20-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3500 - (show annotations) (download)
Mon May 11 14:36:22 2020 UTC (3 years, 11 months ago) by niro
File size: 119530 byte(s)
-linux-5.4.20
1 diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
2 index cc544fdc38be..bc8aed17800d 100644
3 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
4 +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
5 @@ -85,7 +85,7 @@ properties:
6 Must be the device tree identifier of the over-sampling
7 mode pins. As the line is active high, it should be marked
8 GPIO_ACTIVE_HIGH.
9 - maxItems: 1
10 + maxItems: 3
11
12 adi,sw-mode:
13 description:
14 @@ -128,9 +128,9 @@ examples:
15 adi,conversion-start-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
16 reset-gpios = <&gpio 27 GPIO_ACTIVE_HIGH>;
17 adi,first-data-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
18 - adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH
19 - &gpio 23 GPIO_ACTIVE_HIGH
20 - &gpio 26 GPIO_ACTIVE_HIGH>;
21 + adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>,
22 + <&gpio 23 GPIO_ACTIVE_HIGH>,
23 + <&gpio 26 GPIO_ACTIVE_HIGH>;
24 standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>;
25 adi,sw-mode;
26 };
27 diff --git a/Makefile b/Makefile
28 index 2f55d377f0db..21e58bd54715 100644
29 --- a/Makefile
30 +++ b/Makefile
31 @@ -1,7 +1,7 @@
32 # SPDX-License-Identifier: GPL-2.0
33 VERSION = 5
34 PATCHLEVEL = 4
35 -SUBLEVEL = 19
36 +SUBLEVEL = 20
37 EXTRAVERSION =
38 NAME = Kleptomaniac Octopus
39
40 diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
41 index 08bcfed6b80f..134cc223ea81 100644
42 --- a/arch/arc/boot/dts/axs10x_mb.dtsi
43 +++ b/arch/arc/boot/dts/axs10x_mb.dtsi
44 @@ -77,6 +77,7 @@
45 interrupt-names = "macirq";
46 phy-mode = "rgmii";
47 snps,pbl = < 32 >;
48 + snps,multicast-filter-bins = <256>;
49 clocks = <&apbclk>;
50 clock-names = "stmmaceth";
51 max-speed = <100>;
52 diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
53 index 091356f2a8c1..c726cd8dbdf1 100644
54 --- a/arch/arm/boot/dts/am43xx-clocks.dtsi
55 +++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
56 @@ -704,6 +704,60 @@
57 ti,bit-shift = <8>;
58 reg = <0x2a48>;
59 };
60 +
61 + clkout1_osc_div_ck: clkout1-osc-div-ck {
62 + #clock-cells = <0>;
63 + compatible = "ti,divider-clock";
64 + clocks = <&sys_clkin_ck>;
65 + ti,bit-shift = <20>;
66 + ti,max-div = <4>;
67 + reg = <0x4100>;
68 + };
69 +
70 + clkout1_src2_mux_ck: clkout1-src2-mux-ck {
71 + #clock-cells = <0>;
72 + compatible = "ti,mux-clock";
73 + clocks = <&clk_rc32k_ck>, <&sysclk_div>, <&dpll_ddr_m2_ck>,
74 + <&dpll_per_m2_ck>, <&dpll_disp_m2_ck>,
75 + <&dpll_mpu_m2_ck>;
76 + reg = <0x4100>;
77 + };
78 +
79 + clkout1_src2_pre_div_ck: clkout1-src2-pre-div-ck {
80 + #clock-cells = <0>;
81 + compatible = "ti,divider-clock";
82 + clocks = <&clkout1_src2_mux_ck>;
83 + ti,bit-shift = <4>;
84 + ti,max-div = <8>;
85 + reg = <0x4100>;
86 + };
87 +
88 + clkout1_src2_post_div_ck: clkout1-src2-post-div-ck {
89 + #clock-cells = <0>;
90 + compatible = "ti,divider-clock";
91 + clocks = <&clkout1_src2_pre_div_ck>;
92 + ti,bit-shift = <8>;
93 + ti,max-div = <32>;
94 + ti,index-power-of-two;
95 + reg = <0x4100>;
96 + };
97 +
98 + clkout1_mux_ck: clkout1-mux-ck {
99 + #clock-cells = <0>;
100 + compatible = "ti,mux-clock";
101 + clocks = <&clkout1_osc_div_ck>, <&clk_rc32k_ck>,
102 + <&clkout1_src2_post_div_ck>, <&dpll_extdev_m2_ck>;
103 + ti,bit-shift = <16>;
104 + reg = <0x4100>;
105 + };
106 +
107 + clkout1_ck: clkout1-ck {
108 + #clock-cells = <0>;
109 + compatible = "ti,gate-clock";
110 + clocks = <&clkout1_mux_ck>;
111 + ti,bit-shift = <23>;
112 + reg = <0x4100>;
113 + };
114 };
115
116 &prcm {
117 diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
118 index dee9c0c8a096..16c6fd3c4246 100644
119 --- a/arch/arm/boot/dts/at91sam9260.dtsi
120 +++ b/arch/arm/boot/dts/at91sam9260.dtsi
121 @@ -187,7 +187,7 @@
122 usart0 {
123 pinctrl_usart0: usart0-0 {
124 atmel,pins =
125 - <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE
126 + <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
127 AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
128 };
129
130 @@ -221,7 +221,7 @@
131 usart1 {
132 pinctrl_usart1: usart1-0 {
133 atmel,pins =
134 - <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE
135 + <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
136 AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
137 };
138
139 @@ -239,7 +239,7 @@
140 usart2 {
141 pinctrl_usart2: usart2-0 {
142 atmel,pins =
143 - <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE
144 + <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
145 AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
146 };
147
148 @@ -257,7 +257,7 @@
149 usart3 {
150 pinctrl_usart3: usart3-0 {
151 atmel,pins =
152 - <AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_NONE
153 + <AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
154 AT91_PIOB 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
155 };
156
157 @@ -275,7 +275,7 @@
158 uart0 {
159 pinctrl_uart0: uart0-0 {
160 atmel,pins =
161 - <AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_NONE
162 + <AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_PULL_UP
163 AT91_PIOA 30 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>;
164 };
165 };
166 @@ -283,7 +283,7 @@
167 uart1 {
168 pinctrl_uart1: uart1-0 {
169 atmel,pins =
170 - <AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_NONE
171 + <AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
172 AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
173 };
174 };
175 diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
176 index dba025a98527..5ed3d745ac86 100644
177 --- a/arch/arm/boot/dts/at91sam9261.dtsi
178 +++ b/arch/arm/boot/dts/at91sam9261.dtsi
179 @@ -329,7 +329,7 @@
180 usart0 {
181 pinctrl_usart0: usart0-0 {
182 atmel,pins =
183 - <AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE>,
184 + <AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
185 <AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
186 };
187
188 @@ -347,7 +347,7 @@
189 usart1 {
190 pinctrl_usart1: usart1-0 {
191 atmel,pins =
192 - <AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE>,
193 + <AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
194 <AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
195 };
196
197 @@ -365,7 +365,7 @@
198 usart2 {
199 pinctrl_usart2: usart2-0 {
200 atmel,pins =
201 - <AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE>,
202 + <AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
203 <AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
204 };
205
206 diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
207 index 99678abdda93..5c990cfae254 100644
208 --- a/arch/arm/boot/dts/at91sam9263.dtsi
209 +++ b/arch/arm/boot/dts/at91sam9263.dtsi
210 @@ -183,7 +183,7 @@
211 usart0 {
212 pinctrl_usart0: usart0-0 {
213 atmel,pins =
214 - <AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_NONE
215 + <AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
216 AT91_PIOA 27 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
217 };
218
219 @@ -201,7 +201,7 @@
220 usart1 {
221 pinctrl_usart1: usart1-0 {
222 atmel,pins =
223 - <AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_NONE
224 + <AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
225 AT91_PIOD 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
226 };
227
228 @@ -219,7 +219,7 @@
229 usart2 {
230 pinctrl_usart2: usart2-0 {
231 atmel,pins =
232 - <AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_NONE
233 + <AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
234 AT91_PIOD 3 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
235 };
236
237 diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
238 index 691c95ea6175..fd179097a4bf 100644
239 --- a/arch/arm/boot/dts/at91sam9g45.dtsi
240 +++ b/arch/arm/boot/dts/at91sam9g45.dtsi
241 @@ -556,7 +556,7 @@
242 usart0 {
243 pinctrl_usart0: usart0-0 {
244 atmel,pins =
245 - <AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_NONE
246 + <AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
247 AT91_PIOB 18 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
248 };
249
250 @@ -574,7 +574,7 @@
251 usart1 {
252 pinctrl_usart1: usart1-0 {
253 atmel,pins =
254 - <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE
255 + <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
256 AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
257 };
258
259 @@ -592,7 +592,7 @@
260 usart2 {
261 pinctrl_usart2: usart2-0 {
262 atmel,pins =
263 - <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE
264 + <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
265 AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
266 };
267
268 @@ -610,7 +610,7 @@
269 usart3 {
270 pinctrl_usart3: usart3-0 {
271 atmel,pins =
272 - <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE
273 + <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
274 AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
275 };
276
277 diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
278 index 8643b7151565..ea024e4b6e09 100644
279 --- a/arch/arm/boot/dts/at91sam9rl.dtsi
280 +++ b/arch/arm/boot/dts/at91sam9rl.dtsi
281 @@ -682,7 +682,7 @@
282 usart0 {
283 pinctrl_usart0: usart0-0 {
284 atmel,pins =
285 - <AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE>,
286 + <AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
287 <AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
288 };
289
290 @@ -721,7 +721,7 @@
291 usart1 {
292 pinctrl_usart1: usart1-0 {
293 atmel,pins =
294 - <AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE>,
295 + <AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
296 <AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
297 };
298
299 @@ -744,7 +744,7 @@
300 usart2 {
301 pinctrl_usart2: usart2-0 {
302 atmel,pins =
303 - <AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE>,
304 + <AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
305 <AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
306 };
307
308 @@ -767,7 +767,7 @@
309 usart3 {
310 pinctrl_usart3: usart3-0 {
311 atmel,pins =
312 - <AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_NONE>,
313 + <AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
314 <AT91_PIOB 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
315 };
316
317 diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
318 index 3c534cd50ee3..db2033f674c6 100644
319 --- a/arch/arm/boot/dts/meson8.dtsi
320 +++ b/arch/arm/boot/dts/meson8.dtsi
321 @@ -129,8 +129,8 @@
322 gpu_opp_table: gpu-opp-table {
323 compatible = "operating-points-v2";
324
325 - opp-182150000 {
326 - opp-hz = /bits/ 64 <182150000>;
327 + opp-182142857 {
328 + opp-hz = /bits/ 64 <182142857>;
329 opp-microvolt = <1150000>;
330 };
331 opp-318750000 {
332 diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
333 index 099bf8e711c9..1e8c5d7bc824 100644
334 --- a/arch/arm/boot/dts/meson8b.dtsi
335 +++ b/arch/arm/boot/dts/meson8b.dtsi
336 @@ -125,8 +125,8 @@
337 opp-hz = /bits/ 64 <255000000>;
338 opp-microvolt = <1100000>;
339 };
340 - opp-364300000 {
341 - opp-hz = /bits/ 64 <364300000>;
342 + opp-364285714 {
343 + opp-hz = /bits/ 64 <364285714>;
344 opp-microvolt = <1100000>;
345 };
346 opp-425000000 {
347 diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
348 index f770aace0efd..203d40be70a5 100644
349 --- a/arch/arm/boot/dts/sama5d3.dtsi
350 +++ b/arch/arm/boot/dts/sama5d3.dtsi
351 @@ -1188,49 +1188,49 @@
352 usart0_clk: usart0_clk {
353 #clock-cells = <0>;
354 reg = <12>;
355 - atmel,clk-output-range = <0 66000000>;
356 + atmel,clk-output-range = <0 83000000>;
357 };
358
359 usart1_clk: usart1_clk {
360 #clock-cells = <0>;
361 reg = <13>;
362 - atmel,clk-output-range = <0 66000000>;
363 + atmel,clk-output-range = <0 83000000>;
364 };
365
366 usart2_clk: usart2_clk {
367 #clock-cells = <0>;
368 reg = <14>;
369 - atmel,clk-output-range = <0 66000000>;
370 + atmel,clk-output-range = <0 83000000>;
371 };
372
373 usart3_clk: usart3_clk {
374 #clock-cells = <0>;
375 reg = <15>;
376 - atmel,clk-output-range = <0 66000000>;
377 + atmel,clk-output-range = <0 83000000>;
378 };
379
380 uart0_clk: uart0_clk {
381 #clock-cells = <0>;
382 reg = <16>;
383 - atmel,clk-output-range = <0 66000000>;
384 + atmel,clk-output-range = <0 83000000>;
385 };
386
387 twi0_clk: twi0_clk {
388 reg = <18>;
389 #clock-cells = <0>;
390 - atmel,clk-output-range = <0 16625000>;
391 + atmel,clk-output-range = <0 41500000>;
392 };
393
394 twi1_clk: twi1_clk {
395 #clock-cells = <0>;
396 reg = <19>;
397 - atmel,clk-output-range = <0 16625000>;
398 + atmel,clk-output-range = <0 41500000>;
399 };
400
401 twi2_clk: twi2_clk {
402 #clock-cells = <0>;
403 reg = <20>;
404 - atmel,clk-output-range = <0 16625000>;
405 + atmel,clk-output-range = <0 41500000>;
406 };
407
408 mci0_clk: mci0_clk {
409 @@ -1246,19 +1246,19 @@
410 spi0_clk: spi0_clk {
411 #clock-cells = <0>;
412 reg = <24>;
413 - atmel,clk-output-range = <0 133000000>;
414 + atmel,clk-output-range = <0 166000000>;
415 };
416
417 spi1_clk: spi1_clk {
418 #clock-cells = <0>;
419 reg = <25>;
420 - atmel,clk-output-range = <0 133000000>;
421 + atmel,clk-output-range = <0 166000000>;
422 };
423
424 tcb0_clk: tcb0_clk {
425 #clock-cells = <0>;
426 reg = <26>;
427 - atmel,clk-output-range = <0 133000000>;
428 + atmel,clk-output-range = <0 166000000>;
429 };
430
431 pwm_clk: pwm_clk {
432 @@ -1269,7 +1269,7 @@
433 adc_clk: adc_clk {
434 #clock-cells = <0>;
435 reg = <29>;
436 - atmel,clk-output-range = <0 66000000>;
437 + atmel,clk-output-range = <0 83000000>;
438 };
439
440 dma0_clk: dma0_clk {
441 @@ -1300,13 +1300,13 @@
442 ssc0_clk: ssc0_clk {
443 #clock-cells = <0>;
444 reg = <38>;
445 - atmel,clk-output-range = <0 66000000>;
446 + atmel,clk-output-range = <0 83000000>;
447 };
448
449 ssc1_clk: ssc1_clk {
450 #clock-cells = <0>;
451 reg = <39>;
452 - atmel,clk-output-range = <0 66000000>;
453 + atmel,clk-output-range = <0 83000000>;
454 };
455
456 sha_clk: sha_clk {
457 diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi
458 index cf06a018ed0f..2470dd3fff25 100644
459 --- a/arch/arm/boot/dts/sama5d3_can.dtsi
460 +++ b/arch/arm/boot/dts/sama5d3_can.dtsi
461 @@ -36,13 +36,13 @@
462 can0_clk: can0_clk {
463 #clock-cells = <0>;
464 reg = <40>;
465 - atmel,clk-output-range = <0 66000000>;
466 + atmel,clk-output-range = <0 83000000>;
467 };
468
469 can1_clk: can1_clk {
470 #clock-cells = <0>;
471 reg = <41>;
472 - atmel,clk-output-range = <0 66000000>;
473 + atmel,clk-output-range = <0 83000000>;
474 };
475 };
476 };
477 diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
478 index 1584035daf51..215802b8db30 100644
479 --- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi
480 +++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
481 @@ -22,6 +22,7 @@
482 tcb1_clk: tcb1_clk {
483 #clock-cells = <0>;
484 reg = <27>;
485 + atmel,clk-output-range = <0 166000000>;
486 };
487 };
488 };
489 diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi
490 index 4316bdbdc25d..cb62adbd28ed 100644
491 --- a/arch/arm/boot/dts/sama5d3_uart.dtsi
492 +++ b/arch/arm/boot/dts/sama5d3_uart.dtsi
493 @@ -41,13 +41,13 @@
494 uart0_clk: uart0_clk {
495 #clock-cells = <0>;
496 reg = <16>;
497 - atmel,clk-output-range = <0 66000000>;
498 + atmel,clk-output-range = <0 83000000>;
499 };
500
501 uart1_clk: uart1_clk {
502 #clock-cells = <0>;
503 reg = <17>;
504 - atmel,clk-output-range = <0 66000000>;
505 + atmel,clk-output-range = <0 83000000>;
506 };
507 };
508 };
509 diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
510 index d5af6aedc02c..52665f30d236 100644
511 --- a/arch/arm/mach-at91/pm.c
512 +++ b/arch/arm/mach-at91/pm.c
513 @@ -691,6 +691,12 @@ static void __init at91_pm_use_default_mode(int pm_mode)
514 soc_pm.data.suspend_mode = AT91_PM_ULP0;
515 }
516
517 +static const struct of_device_id atmel_shdwc_ids[] = {
518 + { .compatible = "atmel,sama5d2-shdwc" },
519 + { .compatible = "microchip,sam9x60-shdwc" },
520 + { /* sentinel. */ }
521 +};
522 +
523 static void __init at91_pm_modes_init(void)
524 {
525 struct device_node *np;
526 @@ -700,7 +706,7 @@ static void __init at91_pm_modes_init(void)
527 !at91_is_pm_mode_active(AT91_PM_ULP1))
528 return;
529
530 - np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-shdwc");
531 + np = of_find_matching_node(NULL, atmel_shdwc_ids);
532 if (!np) {
533 pr_warn("%s: failed to find shdwc!\n", __func__);
534 goto ulp1_default;
535 @@ -751,6 +757,7 @@ static const struct of_device_id atmel_pmc_ids[] __initconst = {
536 { .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
537 { .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
538 { .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
539 + { .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[1] },
540 { /* sentinel */ },
541 };
542
543 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
544 index b4be3baa83d4..6f19ba53fd1f 100644
545 --- a/arch/arm/mm/init.c
546 +++ b/arch/arm/mm/init.c
547 @@ -323,7 +323,7 @@ static inline void poison_init_mem(void *s, size_t count)
548 *p++ = 0xe7fddef0;
549 }
550
551 -static inline void
552 +static inline void __init
553 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
554 {
555 struct page *start_pg, *end_pg;
556 diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
557 index bd4aab6092e0..e31813a4f972 100644
558 --- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
559 +++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
560 @@ -143,6 +143,7 @@
561 phy-mode = "sgmii";
562 status = "okay";
563 managed = "in-band-status";
564 + phys = <&comphy1 0>;
565 sfp = <&sfp_eth0>;
566 };
567
568 @@ -150,11 +151,14 @@
569 phy-mode = "sgmii";
570 status = "okay";
571 managed = "in-band-status";
572 + phys = <&comphy0 1>;
573 sfp = <&sfp_eth1>;
574 };
575
576 &usb3 {
577 status = "okay";
578 + phys = <&usb2_utmi_otg_phy>;
579 + phy-names = "usb2-utmi-otg-phy";
580 };
581
582 &uart0 {
583 diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
584 index bd881497b872..a211a046b2f2 100644
585 --- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
586 +++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
587 @@ -408,6 +408,8 @@
588 reg = <5>;
589 label = "cpu";
590 ethernet = <&cp1_eth2>;
591 + phy-mode = "2500base-x";
592 + managed = "in-band-status";
593 };
594 };
595
596 diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
597 index ffb64fc239ee..ccd535edbf4e 100644
598 --- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
599 +++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
600 @@ -985,7 +985,7 @@
601
602 tcsr_mutex_regs: syscon@1f40000 {
603 compatible = "syscon";
604 - reg = <0x01f40000 0x20000>;
605 + reg = <0x01f40000 0x40000>;
606 };
607
608 tlmm: pinctrl@3400000 {
609 diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
610 index b38f9d442fc0..e6d700f8c194 100644
611 --- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
612 +++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
613 @@ -636,7 +636,6 @@
614 /* audio_clkout0/1/2/3 */
615 #clock-cells = <1>;
616 clock-frequency = <12288000 11289600>;
617 - clkout-lr-synchronous;
618
619 status = "okay";
620
621 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
622 index 80f459ad0190..f400cb29b811 100644
623 --- a/arch/arm64/kernel/cpufeature.c
624 +++ b/arch/arm64/kernel/cpufeature.c
625 @@ -32,9 +32,7 @@ static unsigned long elf_hwcap __read_mostly;
626 #define COMPAT_ELF_HWCAP_DEFAULT \
627 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
628 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
629 - COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
630 - COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
631 - COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
632 + COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
633 COMPAT_HWCAP_LPAE)
634 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
635 unsigned int compat_elf_hwcap2 __read_mostly;
636 @@ -1367,7 +1365,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
637 {
638 /* FP/SIMD is not implemented */
639 .capability = ARM64_HAS_NO_FPSIMD,
640 - .type = ARM64_CPUCAP_SYSTEM_FEATURE,
641 + .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
642 .min_field_value = 0,
643 .matches = has_no_fpsimd,
644 },
645 @@ -1595,6 +1593,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
646 .match_list = list, \
647 }
648
649 +#define HWCAP_CAP_MATCH(match, cap_type, cap) \
650 + { \
651 + __HWCAP_CAP(#cap, cap_type, cap) \
652 + .matches = match, \
653 + }
654 +
655 #ifdef CONFIG_ARM64_PTR_AUTH
656 static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
657 {
658 @@ -1668,8 +1672,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
659 {},
660 };
661
662 +#ifdef CONFIG_COMPAT
663 +static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
664 +{
665 + /*
666 + * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
667 + * in line with that of arm32 as in vfp_init(). We make sure that the
668 + * check is future proof, by making sure value is non-zero.
669 + */
670 + u32 mvfr1;
671 +
672 + WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
673 + if (scope == SCOPE_SYSTEM)
674 + mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
675 + else
676 + mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
677 +
678 + return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
679 + cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
680 + cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
681 +}
682 +#endif
683 +
684 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
685 #ifdef CONFIG_COMPAT
686 + HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
687 + HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
688 + /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
689 + HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
690 + HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
691 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
692 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
693 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
694 diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
695 index 37d3912cfe06..1765e5284994 100644
696 --- a/arch/arm64/kernel/fpsimd.c
697 +++ b/arch/arm64/kernel/fpsimd.c
698 @@ -269,6 +269,7 @@ static void sve_free(struct task_struct *task)
699 */
700 static void task_fpsimd_load(void)
701 {
702 + WARN_ON(!system_supports_fpsimd());
703 WARN_ON(!have_cpu_fpsimd_context());
704
705 if (system_supports_sve() && test_thread_flag(TIF_SVE))
706 @@ -289,6 +290,7 @@ static void fpsimd_save(void)
707 this_cpu_ptr(&fpsimd_last_state);
708 /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
709
710 + WARN_ON(!system_supports_fpsimd());
711 WARN_ON(!have_cpu_fpsimd_context());
712
713 if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
714 @@ -1092,6 +1094,7 @@ void fpsimd_bind_task_to_cpu(void)
715 struct fpsimd_last_state_struct *last =
716 this_cpu_ptr(&fpsimd_last_state);
717
718 + WARN_ON(!system_supports_fpsimd());
719 last->st = &current->thread.uw.fpsimd_state;
720 last->sve_state = current->thread.sve_state;
721 last->sve_vl = current->thread.sve_vl;
722 @@ -1114,6 +1117,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
723 struct fpsimd_last_state_struct *last =
724 this_cpu_ptr(&fpsimd_last_state);
725
726 + WARN_ON(!system_supports_fpsimd());
727 WARN_ON(!in_softirq() && !irqs_disabled());
728
729 last->st = st;
730 @@ -1128,8 +1132,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
731 */
732 void fpsimd_restore_current_state(void)
733 {
734 - if (!system_supports_fpsimd())
735 + /*
736 + * For the tasks that were created before we detected the absence of
737 + * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
738 + * e.g, init. This could be then inherited by the children processes.
739 + * If we later detect that the system doesn't support FP/SIMD,
740 + * we must clear the flag for all the tasks to indicate that the
741 + * FPSTATE is clean (as we can't have one) to avoid looping for ever in
742 + * do_notify_resume().
743 + */
744 + if (!system_supports_fpsimd()) {
745 + clear_thread_flag(TIF_FOREIGN_FPSTATE);
746 return;
747 + }
748
749 get_cpu_fpsimd_context();
750
751 @@ -1148,7 +1163,7 @@ void fpsimd_restore_current_state(void)
752 */
753 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
754 {
755 - if (!system_supports_fpsimd())
756 + if (WARN_ON(!system_supports_fpsimd()))
757 return;
758
759 get_cpu_fpsimd_context();
760 @@ -1179,7 +1194,13 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
761 void fpsimd_flush_task_state(struct task_struct *t)
762 {
763 t->thread.fpsimd_cpu = NR_CPUS;
764 -
765 + /*
766 + * If we don't support fpsimd, bail out after we have
767 + * reset the fpsimd_cpu for this task and clear the
768 + * FPSTATE.
769 + */
770 + if (!system_supports_fpsimd())
771 + return;
772 barrier();
773 set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
774
775 @@ -1193,6 +1214,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
776 */
777 static void fpsimd_flush_cpu_state(void)
778 {
779 + WARN_ON(!system_supports_fpsimd());
780 __this_cpu_write(fpsimd_last_state.st, NULL);
781 set_thread_flag(TIF_FOREIGN_FPSTATE);
782 }
783 @@ -1203,6 +1225,8 @@ static void fpsimd_flush_cpu_state(void)
784 */
785 void fpsimd_save_and_flush_cpu_state(void)
786 {
787 + if (!system_supports_fpsimd())
788 + return;
789 WARN_ON(preemptible());
790 __get_cpu_fpsimd_context();
791 fpsimd_save();
792 diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
793 index 21176d02e21a..9168c4f1a37f 100644
794 --- a/arch/arm64/kernel/ptrace.c
795 +++ b/arch/arm64/kernel/ptrace.c
796 @@ -615,6 +615,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
797 return 0;
798 }
799
800 +static int fpr_active(struct task_struct *target, const struct user_regset *regset)
801 +{
802 + if (!system_supports_fpsimd())
803 + return -ENODEV;
804 + return regset->n;
805 +}
806 +
807 /*
808 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
809 */
810 @@ -637,6 +644,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
811 unsigned int pos, unsigned int count,
812 void *kbuf, void __user *ubuf)
813 {
814 + if (!system_supports_fpsimd())
815 + return -EINVAL;
816 +
817 if (target == current)
818 fpsimd_preserve_current_state();
819
820 @@ -676,6 +686,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
821 {
822 int ret;
823
824 + if (!system_supports_fpsimd())
825 + return -EINVAL;
826 +
827 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
828 if (ret)
829 return ret;
830 @@ -1134,6 +1147,7 @@ static const struct user_regset aarch64_regsets[] = {
831 */
832 .size = sizeof(u32),
833 .align = sizeof(u32),
834 + .active = fpr_active,
835 .get = fpr_get,
836 .set = fpr_set
837 },
838 @@ -1348,6 +1362,9 @@ static int compat_vfp_get(struct task_struct *target,
839 compat_ulong_t fpscr;
840 int ret, vregs_end_pos;
841
842 + if (!system_supports_fpsimd())
843 + return -EINVAL;
844 +
845 uregs = &target->thread.uw.fpsimd_state;
846
847 if (target == current)
848 @@ -1381,6 +1398,9 @@ static int compat_vfp_set(struct task_struct *target,
849 compat_ulong_t fpscr;
850 int ret, vregs_end_pos;
851
852 + if (!system_supports_fpsimd())
853 + return -EINVAL;
854 +
855 uregs = &target->thread.uw.fpsimd_state;
856
857 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
858 @@ -1438,6 +1458,7 @@ static const struct user_regset aarch32_regsets[] = {
859 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
860 .size = sizeof(compat_ulong_t),
861 .align = sizeof(compat_ulong_t),
862 + .active = fpr_active,
863 .get = compat_vfp_get,
864 .set = compat_vfp_set
865 },
866 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
867 index 799e84a40335..d76a3d39b269 100644
868 --- a/arch/arm64/kvm/hyp/switch.c
869 +++ b/arch/arm64/kvm/hyp/switch.c
870 @@ -28,7 +28,15 @@
871 /* Check whether the FP regs were dirtied while in the host-side run loop: */
872 static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
873 {
874 - if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
875 + /*
876 + * When the system doesn't support FP/SIMD, we cannot rely on
877 + * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
878 + * abort on the very first access to FP and thus we should never
879 + * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
880 + * trap the accesses.
881 + */
882 + if (!system_supports_fpsimd() ||
883 + vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
884 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
885 KVM_ARM64_FP_HOST);
886
887 diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
888 index c59920920ddc..b915fe658979 100644
889 --- a/arch/powerpc/Kconfig.debug
890 +++ b/arch/powerpc/Kconfig.debug
891 @@ -371,7 +371,7 @@ config PPC_PTDUMP
892
893 config PPC_DEBUG_WX
894 bool "Warn on W+X mappings at boot"
895 - depends on PPC_PTDUMP
896 + depends on PPC_PTDUMP && STRICT_KERNEL_RWX
897 help
898 Generate a warning if any W+X mappings are found at boot.
899
900 diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
901 index 8ec5dfb65b2e..784cae9f5697 100644
902 --- a/arch/powerpc/mm/pgtable_32.c
903 +++ b/arch/powerpc/mm/pgtable_32.c
904 @@ -221,6 +221,7 @@ void mark_rodata_ro(void)
905
906 if (v_block_mapped((unsigned long)_sinittext)) {
907 mmu_mark_rodata_ro();
908 + ptdump_check_wx();
909 return;
910 }
911
912 diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
913 index 6ba081dd61c9..b4ce9d472dfe 100644
914 --- a/arch/powerpc/platforms/pseries/iommu.c
915 +++ b/arch/powerpc/platforms/pseries/iommu.c
916 @@ -36,7 +36,6 @@
917 #include <asm/udbg.h>
918 #include <asm/mmzone.h>
919 #include <asm/plpar_wrappers.h>
920 -#include <asm/svm.h>
921
922 #include "pseries.h"
923
924 @@ -133,10 +132,10 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
925 return be64_to_cpu(*tcep);
926 }
927
928 -static void tce_free_pSeriesLP(struct iommu_table*, long, long);
929 +static void tce_free_pSeriesLP(unsigned long liobn, long, long);
930 static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
931
932 -static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
933 +static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
934 long npages, unsigned long uaddr,
935 enum dma_data_direction direction,
936 unsigned long attrs)
937 @@ -147,25 +146,25 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
938 int ret = 0;
939 long tcenum_start = tcenum, npages_start = npages;
940
941 - rpn = __pa(uaddr) >> TCE_SHIFT;
942 + rpn = __pa(uaddr) >> tceshift;
943 proto_tce = TCE_PCI_READ;
944 if (direction != DMA_TO_DEVICE)
945 proto_tce |= TCE_PCI_WRITE;
946
947 while (npages--) {
948 - tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
949 - rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
950 + tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
951 + rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
952
953 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
954 ret = (int)rc;
955 - tce_free_pSeriesLP(tbl, tcenum_start,
956 + tce_free_pSeriesLP(liobn, tcenum_start,
957 (npages_start - (npages + 1)));
958 break;
959 }
960
961 if (rc && printk_ratelimit()) {
962 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
963 - printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
964 + printk("\tindex = 0x%llx\n", (u64)liobn);
965 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
966 printk("\ttce val = 0x%llx\n", tce );
967 dump_stack();
968 @@ -194,7 +193,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
969 unsigned long flags;
970
971 if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
972 - return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
973 + return tce_build_pSeriesLP(tbl->it_index, tcenum,
974 + tbl->it_page_shift, npages, uaddr,
975 direction, attrs);
976 }
977
978 @@ -210,8 +210,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
979 /* If allocation fails, fall back to the loop implementation */
980 if (!tcep) {
981 local_irq_restore(flags);
982 - return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
983 - direction, attrs);
984 + return tce_build_pSeriesLP(tbl->it_index, tcenum,
985 + tbl->it_page_shift,
986 + npages, uaddr, direction, attrs);
987 }
988 __this_cpu_write(tce_page, tcep);
989 }
990 @@ -262,16 +263,16 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
991 return ret;
992 }
993
994 -static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
995 +static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
996 {
997 u64 rc;
998
999 while (npages--) {
1000 - rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
1001 + rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
1002
1003 if (rc && printk_ratelimit()) {
1004 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
1005 - printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
1006 + printk("\tindex = 0x%llx\n", (u64)liobn);
1007 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
1008 dump_stack();
1009 }
1010 @@ -286,7 +287,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
1011 u64 rc;
1012
1013 if (!firmware_has_feature(FW_FEATURE_MULTITCE))
1014 - return tce_free_pSeriesLP(tbl, tcenum, npages);
1015 + return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
1016
1017 rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
1018
1019 @@ -401,6 +402,19 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
1020 u64 rc = 0;
1021 long l, limit;
1022
1023 + if (!firmware_has_feature(FW_FEATURE_MULTITCE)) {
1024 + unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
1025 + unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
1026 + be64_to_cpu(maprange->dma_base);
1027 + unsigned long tcenum = dmastart >> tceshift;
1028 + unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
1029 + void *uaddr = __va(start_pfn << PAGE_SHIFT);
1030 +
1031 + return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
1032 + tcenum, tceshift, npages, (unsigned long) uaddr,
1033 + DMA_BIDIRECTIONAL, 0);
1034 + }
1035 +
1036 local_irq_disable(); /* to protect tcep and the page behind it */
1037 tcep = __this_cpu_read(tce_page);
1038
1039 @@ -1320,15 +1334,7 @@ void iommu_init_early_pSeries(void)
1040 of_reconfig_notifier_register(&iommu_reconfig_nb);
1041 register_memory_notifier(&iommu_mem_nb);
1042
1043 - /*
1044 - * Secure guest memory is inacessible to devices so regular DMA isn't
1045 - * possible.
1046 - *
1047 - * In that case keep devices' dma_map_ops as NULL so that the generic
1048 - * DMA code path will use SWIOTLB to bounce buffers for DMA.
1049 - */
1050 - if (!is_secure_guest())
1051 - set_pci_dma_ops(&dma_iommu_ops);
1052 + set_pci_dma_ops(&dma_iommu_ops);
1053 }
1054
1055 static int __init disable_multitce(char *str)
1056 diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
1057 index ee07d0718bf1..66fd517c4816 100644
1058 --- a/arch/powerpc/platforms/pseries/papr_scm.c
1059 +++ b/arch/powerpc/platforms/pseries/papr_scm.c
1060 @@ -342,6 +342,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
1061 p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
1062 if (!p->bus) {
1063 dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
1064 + kfree(p->bus_desc.provider_name);
1065 return -ENXIO;
1066 }
1067
1068 @@ -498,6 +499,7 @@ static int papr_scm_remove(struct platform_device *pdev)
1069
1070 nvdimm_bus_unregister(p->bus);
1071 drc_pmem_unbind(p);
1072 + kfree(p->bus_desc.provider_name);
1073 kfree(p);
1074
1075 return 0;
1076 diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
1077 index 79e2287991db..f682b7babc09 100644
1078 --- a/arch/powerpc/platforms/pseries/vio.c
1079 +++ b/arch/powerpc/platforms/pseries/vio.c
1080 @@ -1176,6 +1176,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1081 if (tbl == NULL)
1082 return NULL;
1083
1084 + kref_init(&tbl->it_kref);
1085 +
1086 of_parse_dma_window(dev->dev.of_node, dma_window,
1087 &tbl->it_index, &offset, &size);
1088
1089 diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c
1090 index 25019d42ae93..ef2ad7253cd5 100644
1091 --- a/arch/x86/boot/compressed/acpi.c
1092 +++ b/arch/x86/boot/compressed/acpi.c
1093 @@ -393,7 +393,13 @@ int count_immovable_mem_regions(void)
1094 table = table_addr + sizeof(struct acpi_table_srat);
1095
1096 while (table + sizeof(struct acpi_subtable_header) < table_end) {
1097 +
1098 sub_table = (struct acpi_subtable_header *)table;
1099 + if (!sub_table->length) {
1100 + debug_putstr("Invalid zero length SRAT subtable.\n");
1101 + return 0;
1102 + }
1103 +
1104 if (sub_table->type == ACPI_SRAT_TYPE_MEMORY_AFFINITY) {
1105 struct acpi_srat_mem_affinity *ma;
1106
1107 diff --git a/crypto/testmgr.c b/crypto/testmgr.c
1108 index c39e39e55dc2..7473c5bc06b1 100644
1109 --- a/crypto/testmgr.c
1110 +++ b/crypto/testmgr.c
1111 @@ -2102,6 +2102,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
1112 * If the key or authentication tag size couldn't be set, no need to
1113 * continue to encrypt.
1114 */
1115 + vec->crypt_error = 0;
1116 if (vec->setkey_error || vec->setauthsize_error)
1117 goto done;
1118
1119 @@ -2245,10 +2246,12 @@ static int test_aead_vs_generic_impl(const char *driver,
1120 req, tsgls);
1121 if (err)
1122 goto out;
1123 - err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, cfg,
1124 - req, tsgls);
1125 - if (err)
1126 - goto out;
1127 + if (vec.crypt_error == 0) {
1128 + err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name,
1129 + cfg, req, tsgls);
1130 + if (err)
1131 + goto out;
1132 + }
1133 cond_resched();
1134 }
1135 err = 0;
1136 @@ -2678,6 +2681,15 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
1137 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
1138 skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
1139 vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
1140 + if (vec->crypt_error != 0) {
1141 + /*
1142 + * The only acceptable error here is for an invalid length, so
1143 + * skcipher decryption should fail with the same error too.
1144 + * We'll test for this. But to keep the API usage well-defined,
1145 + * explicitly initialize the ciphertext buffer too.
1146 + */
1147 + memset((u8 *)vec->ctext, 0, vec->len);
1148 + }
1149 done:
1150 snprintf(name, max_namelen, "\"random: len=%u klen=%u\"",
1151 vec->len, vec->klen);
1152 diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
1153 index 19f57ccfbe1d..59f911e57719 100644
1154 --- a/drivers/base/regmap/regmap.c
1155 +++ b/drivers/base/regmap/regmap.c
1156 @@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1157
1158 WARN_ON(!map->bus);
1159
1160 - /* Check for unwritable registers before we start */
1161 - for (i = 0; i < val_len / map->format.val_bytes; i++)
1162 - if (!regmap_writeable(map,
1163 - reg + regmap_get_offset(map, i)))
1164 - return -EINVAL;
1165 + /* Check for unwritable or noinc registers in range
1166 + * before we start
1167 + */
1168 + if (!regmap_writeable_noinc(map, reg)) {
1169 + for (i = 0; i < val_len / map->format.val_bytes; i++) {
1170 + unsigned int element =
1171 + reg + regmap_get_offset(map, i);
1172 + if (!regmap_writeable(map, element) ||
1173 + regmap_writeable_noinc(map, element))
1174 + return -EINVAL;
1175 + }
1176 + }
1177
1178 if (!map->cache_bypass && map->format.parse_val) {
1179 unsigned int ival;
1180 diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
1181 index b3af61cc6fb9..d2760a021301 100644
1182 --- a/drivers/clk/meson/g12a.c
1183 +++ b/drivers/clk/meson/g12a.c
1184 @@ -4692,6 +4692,7 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
1185 &g12a_bt656,
1186 &g12a_usb1_to_ddr,
1187 &g12a_mmc_pclk,
1188 + &g12a_uart2,
1189 &g12a_vpu_intr,
1190 &g12a_gic,
1191 &g12a_sd_emmc_a_clk0,
1192 diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
1193 index d32626458e67..1f9c16395a3f 100644
1194 --- a/drivers/crypto/atmel-sha.c
1195 +++ b/drivers/crypto/atmel-sha.c
1196 @@ -1918,12 +1918,7 @@ static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1197 {
1198 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1199
1200 - if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
1201 - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1202 - return -EINVAL;
1203 - }
1204 -
1205 - return 0;
1206 + return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
1207 }
1208
1209 static int atmel_sha_hmac_init(struct ahash_request *req)
1210 diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
1211 index 4b20606983a4..22ebe40f09f5 100644
1212 --- a/drivers/crypto/axis/artpec6_crypto.c
1213 +++ b/drivers/crypto/axis/artpec6_crypto.c
1214 @@ -1251,7 +1251,7 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
1215
1216 if (len != 16 && len != 24 && len != 32) {
1217 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1218 - return -1;
1219 + return -EINVAL;
1220 }
1221
1222 ctx->key_length = len;
1223 diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
1224 index 3443f6d6dd83..6863d7097674 100644
1225 --- a/drivers/crypto/caam/caamalg_qi2.c
1226 +++ b/drivers/crypto/caam/caamalg_qi2.c
1227 @@ -2481,7 +2481,7 @@ static struct caam_aead_alg driver_aeads[] = {
1228 .cra_name = "echainiv(authenc(hmac(sha256),"
1229 "cbc(des)))",
1230 .cra_driver_name = "echainiv-authenc-"
1231 - "hmac-sha256-cbc-desi-"
1232 + "hmac-sha256-cbc-des-"
1233 "caam-qi2",
1234 .cra_blocksize = DES_BLOCK_SIZE,
1235 },
1236 diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
1237 index a0ee404b736e..f1d149e32839 100644
1238 --- a/drivers/dma/dma-axi-dmac.c
1239 +++ b/drivers/dma/dma-axi-dmac.c
1240 @@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
1241 struct dma_device *dma_dev;
1242 struct axi_dmac *dmac;
1243 struct resource *res;
1244 + struct regmap *regmap;
1245 int ret;
1246
1247 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1248 @@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev)
1249
1250 platform_set_drvdata(pdev, dmac);
1251
1252 - devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
1253 + regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
1254 + &axi_dmac_regmap_config);
1255 + if (IS_ERR(regmap)) {
1256 + ret = PTR_ERR(regmap);
1257 + goto err_free_irq;
1258 + }
1259
1260 return 0;
1261
1262 +err_free_irq:
1263 + free_irq(dmac->irq, dmac);
1264 err_unregister_of:
1265 of_dma_controller_free(pdev->dev.of_node);
1266 err_unregister_device:
1267 diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
1268 index 606fa6d86685..1753a9801b70 100644
1269 --- a/drivers/infiniband/core/addr.c
1270 +++ b/drivers/infiniband/core/addr.c
1271 @@ -139,7 +139,7 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
1272 if (ib_nl_is_good_ip_resp(nlh))
1273 ib_nl_process_good_ip_rsep(nlh);
1274
1275 - return skb->len;
1276 + return 0;
1277 }
1278
1279 static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
1280 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1281 index 50052e9a1731..9008937f8ed8 100644
1282 --- a/drivers/infiniband/core/cma.c
1283 +++ b/drivers/infiniband/core/cma.c
1284 @@ -3091,6 +3091,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1285 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1286 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1287
1288 + atomic_inc(&id_priv->refcount);
1289 cma_init_resolve_addr_work(work, id_priv);
1290 queue_work(cma_wq, &work->work);
1291 return 0;
1292 @@ -3117,6 +3118,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
1293 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
1294 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
1295
1296 + atomic_inc(&id_priv->refcount);
1297 cma_init_resolve_addr_work(work, id_priv);
1298 queue_work(cma_wq, &work->work);
1299 return 0;
1300 diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
1301 index 17fc2936c077..bddb5434fbed 100644
1302 --- a/drivers/infiniband/core/sa_query.c
1303 +++ b/drivers/infiniband/core/sa_query.c
1304 @@ -1068,7 +1068,7 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
1305 }
1306
1307 settimeout_out:
1308 - return skb->len;
1309 + return 0;
1310 }
1311
1312 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1313 @@ -1139,7 +1139,7 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1314 }
1315
1316 resp_out:
1317 - return skb->len;
1318 + return 0;
1319 }
1320
1321 static void free_sm_ah(struct kref *kref)
1322 diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
1323 index 24244a2f68cc..0d42ba8c0b69 100644
1324 --- a/drivers/infiniband/core/umem.c
1325 +++ b/drivers/infiniband/core/umem.c
1326 @@ -166,10 +166,13 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
1327 * for any address.
1328 */
1329 mask |= (sg_dma_address(sg) + pgoff) ^ va;
1330 - if (i && i != (umem->nmap - 1))
1331 - /* restrict by length as well for interior SGEs */
1332 - mask |= sg_dma_len(sg);
1333 va += sg_dma_len(sg) - pgoff;
1334 + /* Except for the last entry, the ending iova alignment sets
1335 + * the maximum possible page size as the low bits of the iova
1336 + * must be zero when starting the next chunk.
1337 + */
1338 + if (i != (umem->nmap - 1))
1339 + mask |= va;
1340 pgoff = 0;
1341 }
1342 best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);
1343 diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
1344 index db98111b47f4..f2a2d1246c19 100644
1345 --- a/drivers/infiniband/core/uverbs_main.c
1346 +++ b/drivers/infiniband/core/uverbs_main.c
1347 @@ -220,7 +220,6 @@ void ib_uverbs_release_file(struct kref *ref)
1348 }
1349
1350 static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
1351 - struct ib_uverbs_file *uverbs_file,
1352 struct file *filp, char __user *buf,
1353 size_t count, loff_t *pos,
1354 size_t eventsz)
1355 @@ -238,19 +237,16 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
1356
1357 if (wait_event_interruptible(ev_queue->poll_wait,
1358 (!list_empty(&ev_queue->event_list) ||
1359 - /* The barriers built into wait_event_interruptible()
1360 - * and wake_up() guarentee this will see the null set
1361 - * without using RCU
1362 - */
1363 - !uverbs_file->device->ib_dev)))
1364 + ev_queue->is_closed)))
1365 return -ERESTARTSYS;
1366
1367 + spin_lock_irq(&ev_queue->lock);
1368 +
1369 /* If device was disassociated and no event exists set an error */
1370 - if (list_empty(&ev_queue->event_list) &&
1371 - !uverbs_file->device->ib_dev)
1372 + if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
1373 + spin_unlock_irq(&ev_queue->lock);
1374 return -EIO;
1375 -
1376 - spin_lock_irq(&ev_queue->lock);
1377 + }
1378 }
1379
1380 event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
1381 @@ -285,8 +281,7 @@ static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
1382 {
1383 struct ib_uverbs_async_event_file *file = filp->private_data;
1384
1385 - return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
1386 - buf, count, pos,
1387 + return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos,
1388 sizeof(struct ib_uverbs_async_event_desc));
1389 }
1390
1391 @@ -296,9 +291,8 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
1392 struct ib_uverbs_completion_event_file *comp_ev_file =
1393 filp->private_data;
1394
1395 - return ib_uverbs_event_read(&comp_ev_file->ev_queue,
1396 - comp_ev_file->uobj.ufile, filp,
1397 - buf, count, pos,
1398 + return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count,
1399 + pos,
1400 sizeof(struct ib_uverbs_comp_event_desc));
1401 }
1402
1403 @@ -321,7 +315,9 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
1404 static __poll_t ib_uverbs_async_event_poll(struct file *filp,
1405 struct poll_table_struct *wait)
1406 {
1407 - return ib_uverbs_event_poll(filp->private_data, filp, wait);
1408 + struct ib_uverbs_async_event_file *file = filp->private_data;
1409 +
1410 + return ib_uverbs_event_poll(&file->ev_queue, filp, wait);
1411 }
1412
1413 static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
1414 @@ -335,9 +331,9 @@ static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
1415
1416 static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
1417 {
1418 - struct ib_uverbs_event_queue *ev_queue = filp->private_data;
1419 + struct ib_uverbs_async_event_file *file = filp->private_data;
1420
1421 - return fasync_helper(fd, filp, on, &ev_queue->async_queue);
1422 + return fasync_helper(fd, filp, on, &file->ev_queue.async_queue);
1423 }
1424
1425 static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
1426 diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
1427 index d44cf33df81a..238614370927 100644
1428 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c
1429 +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
1430 @@ -1225,6 +1225,8 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
1431 const struct in_ifaddr *ifa;
1432
1433 idev = in_dev_get(dev);
1434 + if (!idev)
1435 + continue;
1436 in_dev_for_each_ifa_rtnl(ifa, idev) {
1437 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
1438 "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
1439 diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
1440 index ecd6cadd529a..b591861934b3 100644
1441 --- a/drivers/infiniband/hw/mlx4/cm.c
1442 +++ b/drivers/infiniband/hw/mlx4/cm.c
1443 @@ -186,23 +186,6 @@ out:
1444 kfree(ent);
1445 }
1446
1447 -static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
1448 -{
1449 - struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
1450 - struct rb_root *sl_id_map = &sriov->sl_id_map;
1451 - struct id_map_entry *ent, *found_ent;
1452 -
1453 - spin_lock(&sriov->id_map_lock);
1454 - ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
1455 - if (!ent)
1456 - goto out;
1457 - found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
1458 - if (found_ent && found_ent == ent)
1459 - rb_erase(&found_ent->node, sl_id_map);
1460 -out:
1461 - spin_unlock(&sriov->id_map_lock);
1462 -}
1463 -
1464 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
1465 {
1466 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
1467 @@ -294,7 +277,7 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
1468 spin_lock(&sriov->id_map_lock);
1469 spin_lock_irqsave(&sriov->going_down_lock, flags);
1470 /*make sure that there is no schedule inside the scheduled work.*/
1471 - if (!sriov->is_going_down) {
1472 + if (!sriov->is_going_down && !id->scheduled_delete) {
1473 id->scheduled_delete = 1;
1474 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
1475 }
1476 @@ -341,9 +324,6 @@ cont:
1477
1478 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
1479 schedule_delayed(ibdev, id);
1480 - else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
1481 - id_map_find_del(ibdev, pv_cm_id);
1482 -
1483 return 0;
1484 }
1485
1486 @@ -382,12 +362,9 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
1487 *slave = id->slave_id;
1488 set_remote_comm_id(mad, id->sl_cm_id);
1489
1490 - if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
1491 + if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
1492 + mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
1493 schedule_delayed(ibdev, id);
1494 - else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
1495 - mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
1496 - id_map_find_del(ibdev, (int) pv_cm_id);
1497 - }
1498
1499 return 0;
1500 }
1501 diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
1502 index 907d99822bf0..369a203332a2 100644
1503 --- a/drivers/infiniband/hw/mlx4/main.c
1504 +++ b/drivers/infiniband/hw/mlx4/main.c
1505 @@ -246,6 +246,13 @@ static int mlx4_ib_update_gids(struct gid_entry *gids,
1506 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
1507 }
1508
1509 +static void free_gid_entry(struct gid_entry *entry)
1510 +{
1511 + memset(&entry->gid, 0, sizeof(entry->gid));
1512 + kfree(entry->ctx);
1513 + entry->ctx = NULL;
1514 +}
1515 +
1516 static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
1517 {
1518 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
1519 @@ -306,6 +313,8 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
1520 GFP_ATOMIC);
1521 if (!gids) {
1522 ret = -ENOMEM;
1523 + *context = NULL;
1524 + free_gid_entry(&port_gid_table->gids[free]);
1525 } else {
1526 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
1527 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
1528 @@ -317,6 +326,12 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
1529
1530 if (!ret && hw_update) {
1531 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
1532 + if (ret) {
1533 + spin_lock_bh(&iboe->lock);
1534 + *context = NULL;
1535 + free_gid_entry(&port_gid_table->gids[free]);
1536 + spin_unlock_bh(&iboe->lock);
1537 + }
1538 kfree(gids);
1539 }
1540
1541 @@ -346,10 +361,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
1542 if (!ctx->refcount) {
1543 unsigned int real_index = ctx->real_index;
1544
1545 - memset(&port_gid_table->gids[real_index].gid, 0,
1546 - sizeof(port_gid_table->gids[real_index].gid));
1547 - kfree(port_gid_table->gids[real_index].ctx);
1548 - port_gid_table->gids[real_index].ctx = NULL;
1549 + free_gid_entry(&port_gid_table->gids[real_index]);
1550 hw_update = 1;
1551 }
1552 }
1553 diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1554 index b5960351bec0..8708ed5477e9 100644
1555 --- a/drivers/infiniband/ulp/srp/ib_srp.c
1556 +++ b/drivers/infiniband/ulp/srp/ib_srp.c
1557 @@ -2536,7 +2536,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1558 if (lrsp->opcode == SRP_LOGIN_RSP) {
1559 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1560 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1561 - ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
1562 + ch->use_imm_data = srp_use_imm_data &&
1563 + (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
1564 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1565 ch->use_imm_data);
1566 WARN_ON_ONCE(ch->max_it_iu_len >
1567 diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
1568 index ed90361b84dc..ee8d48d863e1 100644
1569 --- a/drivers/iommu/arm-smmu-v3.c
1570 +++ b/drivers/iommu/arm-smmu-v3.c
1571 @@ -856,6 +856,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
1572 cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
1573 break;
1574 case CMDQ_OP_TLBI_NH_VA:
1575 + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
1576 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
1577 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
1578 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
1579 diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
1580 index be2a2a201603..33ddc5269e8d 100644
1581 --- a/drivers/md/bcache/journal.c
1582 +++ b/drivers/md/bcache/journal.c
1583 @@ -417,10 +417,14 @@ err:
1584
1585 /* Journalling */
1586
1587 +#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask))
1588 +
1589 static void btree_flush_write(struct cache_set *c)
1590 {
1591 struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
1592 - unsigned int i, n;
1593 + unsigned int i, nr, ref_nr;
1594 + atomic_t *fifo_front_p, *now_fifo_front_p;
1595 + size_t mask;
1596
1597 if (c->journal.btree_flushing)
1598 return;
1599 @@ -433,12 +437,50 @@ static void btree_flush_write(struct cache_set *c)
1600 c->journal.btree_flushing = true;
1601 spin_unlock(&c->journal.flush_write_lock);
1602
1603 + /* get the oldest journal entry and check its refcount */
1604 + spin_lock(&c->journal.lock);
1605 + fifo_front_p = &fifo_front(&c->journal.pin);
1606 + ref_nr = atomic_read(fifo_front_p);
1607 + if (ref_nr <= 0) {
1608 + /*
1609 + * do nothing if no btree node references
1610 + * the oldest journal entry
1611 + */
1612 + spin_unlock(&c->journal.lock);
1613 + goto out;
1614 + }
1615 + spin_unlock(&c->journal.lock);
1616 +
1617 + mask = c->journal.pin.mask;
1618 + nr = 0;
1619 atomic_long_inc(&c->flush_write);
1620 memset(btree_nodes, 0, sizeof(btree_nodes));
1621 - n = 0;
1622
1623 mutex_lock(&c->bucket_lock);
1624 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
1625 + /*
1626 + * It is safe to get now_fifo_front_p without holding
1627 + * c->journal.lock here, because we don't need to know
1628 + * the exactly accurate value, just check whether the
1629 + * front pointer of c->journal.pin is changed.
1630 + */
1631 + now_fifo_front_p = &fifo_front(&c->journal.pin);
1632 + /*
1633 + * If the oldest journal entry is reclaimed and front
1634 + * pointer of c->journal.pin changes, it is unnecessary
1635 + * to scan c->btree_cache anymore, just quit the loop and
1636 + * flush out what we have already.
1637 + */
1638 + if (now_fifo_front_p != fifo_front_p)
1639 + break;
1640 + /*
1641 + * quit this loop if all matching btree nodes are
1642 + * scanned and record in btree_nodes[] already.
1643 + */
1644 + ref_nr = atomic_read(fifo_front_p);
1645 + if (nr >= ref_nr)
1646 + break;
1647 +
1648 if (btree_node_journal_flush(b))
1649 pr_err("BUG: flush_write bit should not be set here!");
1650
1651 @@ -454,17 +496,44 @@ static void btree_flush_write(struct cache_set *c)
1652 continue;
1653 }
1654
1655 + /*
1656 + * Only select the btree node which exactly references
1657 + * the oldest journal entry.
1658 + *
1659 + * If the journal entry pointed by fifo_front_p is
1660 + * reclaimed in parallel, don't worry:
1661 + * - the list_for_each_xxx loop will quit when checking
1662 + * next now_fifo_front_p.
1663 + * - If there are matched nodes recorded in btree_nodes[],
1664 + * they are clean now (this is why and how the oldest
1665 + * journal entry can be reclaimed). These selected nodes
1666 + * will be ignored and skipped in the folowing for-loop.
1667 + */
1668 + if (nr_to_fifo_front(btree_current_write(b)->journal,
1669 + fifo_front_p,
1670 + mask) != 0) {
1671 + mutex_unlock(&b->write_lock);
1672 + continue;
1673 + }
1674 +
1675 set_btree_node_journal_flush(b);
1676
1677 mutex_unlock(&b->write_lock);
1678
1679 - btree_nodes[n++] = b;
1680 - if (n == BTREE_FLUSH_NR)
1681 + btree_nodes[nr++] = b;
1682 + /*
1683 + * To avoid holding c->bucket_lock too long time,
1684 + * only scan for BTREE_FLUSH_NR matched btree nodes
1685 + * at most. If there are more btree nodes reference
1686 + * the oldest journal entry, try to flush them next
1687 + * time when btree_flush_write() is called.
1688 + */
1689 + if (nr == BTREE_FLUSH_NR)
1690 break;
1691 }
1692 mutex_unlock(&c->bucket_lock);
1693
1694 - for (i = 0; i < n; i++) {
1695 + for (i = 0; i < nr; i++) {
1696 b = btree_nodes[i];
1697 if (!b) {
1698 pr_err("BUG: btree_nodes[%d] is NULL", i);
1699 @@ -497,6 +566,7 @@ static void btree_flush_write(struct cache_set *c)
1700 mutex_unlock(&b->write_lock);
1701 }
1702
1703 +out:
1704 spin_lock(&c->journal.flush_write_lock);
1705 c->journal.btree_flushing = false;
1706 spin_unlock(&c->journal.flush_write_lock);
1707 diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
1708 index 5042f9e94aee..fccb388ce179 100644
1709 --- a/drivers/media/i2c/adv748x/adv748x.h
1710 +++ b/drivers/media/i2c/adv748x/adv748x.h
1711 @@ -394,10 +394,10 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
1712
1713 #define io_read(s, r) adv748x_read(s, ADV748X_PAGE_IO, r)
1714 #define io_write(s, r, v) adv748x_write(s, ADV748X_PAGE_IO, r, v)
1715 -#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~m) | v)
1716 +#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~(m)) | (v))
1717
1718 #define hdmi_read(s, r) adv748x_read(s, ADV748X_PAGE_HDMI, r)
1719 -#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, r+1)) & m)
1720 +#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, (r)+1)) & (m))
1721 #define hdmi_write(s, r, v) adv748x_write(s, ADV748X_PAGE_HDMI, r, v)
1722
1723 #define repeater_read(s, r) adv748x_read(s, ADV748X_PAGE_REPEATER, r)
1724 @@ -405,11 +405,11 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
1725
1726 #define sdp_read(s, r) adv748x_read(s, ADV748X_PAGE_SDP, r)
1727 #define sdp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_SDP, r, v)
1728 -#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~m) | v)
1729 +#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~(m)) | (v))
1730
1731 #define cp_read(s, r) adv748x_read(s, ADV748X_PAGE_CP, r)
1732 #define cp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_CP, r, v)
1733 -#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~m) | v)
1734 +#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~(m)) | (v))
1735
1736 #define tx_read(t, r) adv748x_read(t->state, t->page, r)
1737 #define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v)
1738 diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
1739 index ae24d3ea68ea..43169f25da1f 100644
1740 --- a/drivers/mfd/Kconfig
1741 +++ b/drivers/mfd/Kconfig
1742 @@ -758,6 +758,7 @@ config MFD_MAX77650
1743 depends on OF || COMPILE_TEST
1744 select MFD_CORE
1745 select REGMAP_I2C
1746 + select REGMAP_IRQ
1747 help
1748 Say Y here to add support for Maxim Semiconductor MAX77650 and
1749 MAX77651 Power Management ICs. This is the core multifunction
1750 diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
1751 index 77bd32a683e1..9e81cd982dd3 100644
1752 --- a/drivers/mtd/nand/onenand/onenand_base.c
1753 +++ b/drivers/mtd/nand/onenand/onenand_base.c
1754 @@ -1248,44 +1248,44 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1755
1756 stats = mtd->ecc_stats;
1757
1758 - /* Read-while-load method */
1759 + /* Read-while-load method */
1760
1761 - /* Do first load to bufferRAM */
1762 - if (read < len) {
1763 - if (!onenand_check_bufferram(mtd, from)) {
1764 + /* Do first load to bufferRAM */
1765 + if (read < len) {
1766 + if (!onenand_check_bufferram(mtd, from)) {
1767 this->command(mtd, ONENAND_CMD_READ, from, writesize);
1768 - ret = this->wait(mtd, FL_READING);
1769 - onenand_update_bufferram(mtd, from, !ret);
1770 + ret = this->wait(mtd, FL_READING);
1771 + onenand_update_bufferram(mtd, from, !ret);
1772 if (mtd_is_eccerr(ret))
1773 ret = 0;
1774 - }
1775 - }
1776 + }
1777 + }
1778
1779 thislen = min_t(int, writesize, len - read);
1780 column = from & (writesize - 1);
1781 if (column + thislen > writesize)
1782 thislen = writesize - column;
1783
1784 - while (!ret) {
1785 - /* If there is more to load then start next load */
1786 - from += thislen;
1787 - if (read + thislen < len) {
1788 + while (!ret) {
1789 + /* If there is more to load then start next load */
1790 + from += thislen;
1791 + if (read + thislen < len) {
1792 this->command(mtd, ONENAND_CMD_READ, from, writesize);
1793 - /*
1794 - * Chip boundary handling in DDP
1795 - * Now we issued chip 1 read and pointed chip 1
1796 + /*
1797 + * Chip boundary handling in DDP
1798 + * Now we issued chip 1 read and pointed chip 1
1799 * bufferram so we have to point chip 0 bufferram.
1800 - */
1801 - if (ONENAND_IS_DDP(this) &&
1802 - unlikely(from == (this->chipsize >> 1))) {
1803 - this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
1804 - boundary = 1;
1805 - } else
1806 - boundary = 0;
1807 - ONENAND_SET_PREV_BUFFERRAM(this);
1808 - }
1809 - /* While load is going, read from last bufferRAM */
1810 - this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
1811 + */
1812 + if (ONENAND_IS_DDP(this) &&
1813 + unlikely(from == (this->chipsize >> 1))) {
1814 + this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
1815 + boundary = 1;
1816 + } else
1817 + boundary = 0;
1818 + ONENAND_SET_PREV_BUFFERRAM(this);
1819 + }
1820 + /* While load is going, read from last bufferRAM */
1821 + this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
1822
1823 /* Read oob area if needed */
1824 if (oobbuf) {
1825 @@ -1301,24 +1301,24 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1826 oobcolumn = 0;
1827 }
1828
1829 - /* See if we are done */
1830 - read += thislen;
1831 - if (read == len)
1832 - break;
1833 - /* Set up for next read from bufferRAM */
1834 - if (unlikely(boundary))
1835 - this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
1836 - ONENAND_SET_NEXT_BUFFERRAM(this);
1837 - buf += thislen;
1838 + /* See if we are done */
1839 + read += thislen;
1840 + if (read == len)
1841 + break;
1842 + /* Set up for next read from bufferRAM */
1843 + if (unlikely(boundary))
1844 + this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
1845 + ONENAND_SET_NEXT_BUFFERRAM(this);
1846 + buf += thislen;
1847 thislen = min_t(int, writesize, len - read);
1848 - column = 0;
1849 - cond_resched();
1850 - /* Now wait for load */
1851 - ret = this->wait(mtd, FL_READING);
1852 - onenand_update_bufferram(mtd, from, !ret);
1853 + column = 0;
1854 + cond_resched();
1855 + /* Now wait for load */
1856 + ret = this->wait(mtd, FL_READING);
1857 + onenand_update_bufferram(mtd, from, !ret);
1858 if (mtd_is_eccerr(ret))
1859 ret = 0;
1860 - }
1861 + }
1862
1863 /*
1864 * Return success, if no ECC failures, else -EBADMSG
1865 diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
1866 index e5ea6127ab5a..671a61845bd5 100644
1867 --- a/drivers/mtd/parsers/sharpslpart.c
1868 +++ b/drivers/mtd/parsers/sharpslpart.c
1869 @@ -165,10 +165,10 @@ static int sharpsl_nand_get_logical_num(u8 *oob)
1870
1871 static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
1872 {
1873 - unsigned int block_num, log_num, phymax;
1874 + unsigned int block_num, phymax;
1875 + int i, ret, log_num;
1876 loff_t block_adr;
1877 u8 *oob;
1878 - int i, ret;
1879
1880 oob = kzalloc(mtd->oobsize, GFP_KERNEL);
1881 if (!oob)
1882 diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
1883 index 347bb92e4130..0a727502d14c 100644
1884 --- a/drivers/net/wireless/ath/ath10k/pci.c
1885 +++ b/drivers/net/wireless/ath/ath10k/pci.c
1886 @@ -1604,11 +1604,22 @@ static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
1887 {
1888 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1889 u32 i;
1890 + int ret;
1891 +
1892 + mutex_lock(&ar->conf_mutex);
1893 + if (ar->state != ATH10K_STATE_ON) {
1894 + ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
1895 + ret = -EIO;
1896 + goto done;
1897 + }
1898
1899 for (i = 0; i < region->len; i += 4)
1900 *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
1901
1902 - return region->len;
1903 + ret = region->len;
1904 +done:
1905 + mutex_unlock(&ar->conf_mutex);
1906 + return ret;
1907 }
1908
1909 /* if an error happened returns < 0, otherwise the length */
1910 @@ -1704,7 +1715,11 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
1911 count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
1912 break;
1913 case ATH10K_MEM_REGION_TYPE_IOREG:
1914 - count = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1915 + ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1916 + if (ret < 0)
1917 + break;
1918 +
1919 + count = ret;
1920 break;
1921 default:
1922 ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
1923 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
1924 index 9f4b117db9d7..d47f76890cf9 100644
1925 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
1926 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
1927 @@ -8,6 +8,7 @@
1928 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
1929 * Copyright (C) 2018 Intel Corporation
1930 * Copyright (C) 2019 Intel Corporation
1931 + * Copyright (C) 2020 Intel Corporation
1932 *
1933 * This program is free software; you can redistribute it and/or modify
1934 * it under the terms of version 2 of the GNU General Public License as
1935 @@ -30,6 +31,7 @@
1936 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
1937 * Copyright (C) 2018 Intel Corporation
1938 * Copyright (C) 2019 Intel Corporation
1939 + * Copyright (C) 2020 Intel Corporation
1940 * All rights reserved.
1941 *
1942 * Redistribution and use in source and binary forms, with or without
1943 @@ -389,6 +391,8 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
1944 if (req != mvm->ftm_initiator.req)
1945 return;
1946
1947 + iwl_mvm_ftm_reset(mvm);
1948 +
1949 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
1950 LOCATION_GROUP, 0),
1951 0, sizeof(cmd), &cmd))
1952 @@ -502,7 +506,6 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1953 lockdep_assert_held(&mvm->mutex);
1954
1955 if (!mvm->ftm_initiator.req) {
1956 - IWL_ERR(mvm, "Got FTM response but have no request?\n");
1957 return;
1958 }
1959
1960 diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
1961 index c9401c121a14..4e3de684928b 100644
1962 --- a/drivers/net/wireless/marvell/libertas/cfg.c
1963 +++ b/drivers/net/wireless/marvell/libertas/cfg.c
1964 @@ -1785,6 +1785,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
1965 rates_max = rates_eid[1];
1966 if (rates_max > MAX_RATES) {
1967 lbs_deb_join("invalid rates");
1968 + rcu_read_unlock();
1969 + ret = -EINVAL;
1970 goto out;
1971 }
1972 rates = cmd.bss.rates;
1973 diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
1974 index 593c594982cb..59f0651d148b 100644
1975 --- a/drivers/net/wireless/marvell/mwifiex/scan.c
1976 +++ b/drivers/net/wireless/marvell/mwifiex/scan.c
1977 @@ -2886,6 +2886,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
1978 vs_param_set->header.len =
1979 cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
1980 & 0x00FF) + 2);
1981 + if (le16_to_cpu(vs_param_set->header.len) >
1982 + MWIFIEX_MAX_VSIE_LEN) {
1983 + mwifiex_dbg(priv->adapter, ERROR,
1984 + "Invalid param length!\n");
1985 + break;
1986 + }
1987 +
1988 memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
1989 le16_to_cpu(vs_param_set->header.len));
1990 *buffer += le16_to_cpu(vs_param_set->header.len) +
1991 diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
1992 index 41f0231376c0..132f9e8ed68c 100644
1993 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c
1994 +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
1995 @@ -970,6 +970,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
1996 "WMM Parameter Set Count: %d\n",
1997 wmm_param_ie->qos_info_bitmap & mask);
1998
1999 + if (wmm_param_ie->vend_hdr.len + 2 >
2000 + sizeof(struct ieee_types_wmm_parameter))
2001 + break;
2002 +
2003 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
2004 wmm_ie, wmm_param_ie,
2005 wmm_param_ie->vend_hdr.len + 2);
2006 diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
2007 index 090b632965e2..ac93f5a0398e 100644
2008 --- a/drivers/pci/controller/pci-tegra.c
2009 +++ b/drivers/pci/controller/pci-tegra.c
2010 @@ -2499,7 +2499,6 @@ static const struct tegra_pcie_soc tegra20_pcie = {
2011 .num_ports = 2,
2012 .ports = tegra20_pcie_ports,
2013 .msi_base_shift = 0,
2014 - .afi_pex2_ctrl = 0x128,
2015 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2016 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2017 .pads_refclk_cfg0 = 0xfa5cfa5c,
2018 @@ -2528,6 +2527,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
2019 .num_ports = 3,
2020 .ports = tegra30_pcie_ports,
2021 .msi_base_shift = 8,
2022 + .afi_pex2_ctrl = 0x128,
2023 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2024 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2025 .pads_refclk_cfg0 = 0xfa5cfa5c,
2026 diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
2027 index b3f972e8cfed..deec9f9e0b61 100644
2028 --- a/drivers/pci/iov.c
2029 +++ b/drivers/pci/iov.c
2030 @@ -187,10 +187,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2031 sprintf(buf, "virtfn%u", id);
2032 rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
2033 if (rc)
2034 - goto failed2;
2035 + goto failed1;
2036 rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
2037 if (rc)
2038 - goto failed3;
2039 + goto failed2;
2040
2041 kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
2042
2043 @@ -198,11 +198,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2044
2045 return 0;
2046
2047 -failed3:
2048 - sysfs_remove_link(&dev->dev.kobj, buf);
2049 failed2:
2050 - pci_stop_and_remove_bus_device(virtfn);
2051 + sysfs_remove_link(&dev->dev.kobj, buf);
2052 failed1:
2053 + pci_stop_and_remove_bus_device(virtfn);
2054 pci_dev_put(dev);
2055 failed0:
2056 virtfn_remove_bus(dev->bus, bus);
2057 diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
2058 index b45bc47d04fe..271aecfbc3bf 100644
2059 --- a/drivers/pci/pcie/aer.c
2060 +++ b/drivers/pci/pcie/aer.c
2061 @@ -1387,6 +1387,7 @@ static int aer_probe(struct pcie_device *dev)
2062 return -ENOMEM;
2063
2064 rpc->rpd = port;
2065 + INIT_KFIFO(rpc->aer_fifo);
2066 set_service_data(dev, rpc);
2067
2068 status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
2069 diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
2070 index e7dbe21705ba..5356630e0e48 100644
2071 --- a/drivers/pci/setup-bus.c
2072 +++ b/drivers/pci/setup-bus.c
2073 @@ -1785,12 +1785,18 @@ again:
2074 /* Restore size and flags */
2075 list_for_each_entry(fail_res, &fail_head, list) {
2076 struct resource *res = fail_res->res;
2077 + int idx;
2078
2079 res->start = fail_res->start;
2080 res->end = fail_res->end;
2081 res->flags = fail_res->flags;
2082 - if (fail_res->dev->subordinate)
2083 - res->flags = 0;
2084 +
2085 + if (pci_is_bridge(fail_res->dev)) {
2086 + idx = res - &fail_res->dev->resource[0];
2087 + if (idx >= PCI_BRIDGE_RESOURCES &&
2088 + idx <= PCI_BRIDGE_RESOURCE_END)
2089 + res->flags = 0;
2090 + }
2091 }
2092 free_list(&fail_head);
2093
2094 @@ -2037,12 +2043,18 @@ again:
2095 /* Restore size and flags */
2096 list_for_each_entry(fail_res, &fail_head, list) {
2097 struct resource *res = fail_res->res;
2098 + int idx;
2099
2100 res->start = fail_res->start;
2101 res->end = fail_res->end;
2102 res->flags = fail_res->flags;
2103 - if (fail_res->dev->subordinate)
2104 - res->flags = 0;
2105 +
2106 + if (pci_is_bridge(fail_res->dev)) {
2107 + idx = res - &fail_res->dev->resource[0];
2108 + if (idx >= PCI_BRIDGE_RESOURCES &&
2109 + idx <= PCI_BRIDGE_RESOURCE_END)
2110 + res->flags = 0;
2111 + }
2112 }
2113 free_list(&fail_head);
2114
2115 diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
2116 index 465d6afd826e..cc43c855452f 100644
2117 --- a/drivers/pci/switch/switchtec.c
2118 +++ b/drivers/pci/switch/switchtec.c
2119 @@ -1276,7 +1276,7 @@ static int switchtec_init_isr(struct switchtec_dev *stdev)
2120 if (nvecs < 0)
2121 return nvecs;
2122
2123 - event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
2124 + event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
2125 if (event_irq < 0 || event_irq >= nvecs)
2126 return -EFAULT;
2127
2128 @@ -1349,7 +1349,7 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
2129 if (rc)
2130 return rc;
2131
2132 - rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2133 + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2134 if (rc)
2135 return rc;
2136
2137 diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
2138 index 24866a5958ae..a9875038ed9b 100644
2139 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
2140 +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
2141 @@ -2305,7 +2305,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
2142 FN_ATAG0_A, 0, FN_REMOCON_B, 0,
2143 /* IP0_11_8 [4] */
2144 FN_SD1_DAT2_A, FN_MMC_D2, 0, FN_BS,
2145 - FN_ATADIR0_A, 0, FN_SDSELF_B, 0,
2146 + FN_ATADIR0_A, 0, FN_SDSELF_A, 0,
2147 FN_PWM4_B, 0, 0, 0,
2148 0, 0, 0, 0,
2149 /* IP0_7_5 [3] */
2150 @@ -2349,7 +2349,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
2151 FN_TS_SDAT0_A, 0, 0, 0,
2152 0, 0, 0, 0,
2153 /* IP1_10_8 [3] */
2154 - FN_SD1_CLK_B, FN_MMC_D6, 0, FN_A24,
2155 + FN_SD1_CD_A, FN_MMC_D6, 0, FN_A24,
2156 FN_DREQ1_A, 0, FN_HRX0_B, FN_TS_SPSYNC0_A,
2157 /* IP1_7_5 [3] */
2158 FN_A23, FN_HTX0_B, FN_TX2_B, FN_DACK2_A,
2159 diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
2160 index 697c77a4ea95..773d3bc38c8c 100644
2161 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
2162 +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
2163 @@ -5984,7 +5984,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
2164 { PIN_DU_DOTCLKIN1, 0, 2 }, /* DU_DOTCLKIN1 */
2165 } },
2166 { PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
2167 - { PIN_DU_DOTCLKIN3, 28, 2 }, /* DU_DOTCLKIN3 */
2168 + { PIN_DU_DOTCLKIN3, 24, 2 }, /* DU_DOTCLKIN3 */
2169 { PIN_FSCLKST, 20, 2 }, /* FSCLKST */
2170 { PIN_TMS, 4, 2 }, /* TMS */
2171 } },
2172 @@ -6240,8 +6240,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
2173 [31] = PIN_DU_DOTCLKIN1, /* DU_DOTCLKIN1 */
2174 } },
2175 { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
2176 - [ 0] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
2177 - [ 1] = SH_PFC_PIN_NONE,
2178 + [ 0] = SH_PFC_PIN_NONE,
2179 + [ 1] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
2180 [ 2] = PIN_FSCLKST, /* FSCLKST */
2181 [ 3] = PIN_EXTALR, /* EXTALR*/
2182 [ 4] = PIN_TRST_N, /* TRST# */
2183 diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
2184 index 292bace83f1e..6f436836fe50 100644
2185 --- a/drivers/platform/x86/intel_mid_powerbtn.c
2186 +++ b/drivers/platform/x86/intel_mid_powerbtn.c
2187 @@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev)
2188
2189 input_set_capability(input, EV_KEY, KEY_POWER);
2190
2191 - ddata = (struct mid_pb_ddata *)id->driver_data;
2192 + ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data,
2193 + sizeof(*ddata), GFP_KERNEL);
2194 if (!ddata)
2195 - return -ENODATA;
2196 + return -ENOMEM;
2197
2198 ddata->dev = &pdev->dev;
2199 ddata->irq = irq;
2200 diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
2201 index 033303708c8b..cb28bbdc9e17 100644
2202 --- a/drivers/rtc/rtc-cmos.c
2203 +++ b/drivers/rtc/rtc-cmos.c
2204 @@ -850,7 +850,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
2205 rtc_cmos_int_handler = cmos_interrupt;
2206
2207 retval = request_irq(rtc_irq, rtc_cmos_int_handler,
2208 - IRQF_SHARED, dev_name(&cmos_rtc.rtc->dev),
2209 + 0, dev_name(&cmos_rtc.rtc->dev),
2210 cmos_rtc.rtc);
2211 if (retval < 0) {
2212 dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
2213 diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
2214 index 443f6d05ce29..fb6d7967ec00 100644
2215 --- a/drivers/rtc/rtc-hym8563.c
2216 +++ b/drivers/rtc/rtc-hym8563.c
2217 @@ -97,7 +97,7 @@ static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
2218
2219 if (!hym8563->valid) {
2220 dev_warn(&client->dev, "no valid clock/calendar values available\n");
2221 - return -EPERM;
2222 + return -EINVAL;
2223 }
2224
2225 ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf);
2226 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2227 index 0d41a7dc1d6b..b0d6978d78bf 100644
2228 --- a/drivers/scsi/ufs/ufshcd.c
2229 +++ b/drivers/scsi/ufs/ufshcd.c
2230 @@ -6953,7 +6953,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
2231 ufshcd_init_icc_levels(hba);
2232
2233 /* Add required well known logical units to scsi mid layer */
2234 - if (ufshcd_scsi_add_wlus(hba))
2235 + ret = ufshcd_scsi_add_wlus(hba);
2236 + if (ret)
2237 goto out;
2238
2239 /* Initialize devfreq after UFS device is detected */
2240 diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
2241 index 5741ec3fa814..51850cc68b70 100644
2242 --- a/drivers/soc/qcom/rpmhpd.c
2243 +++ b/drivers/soc/qcom/rpmhpd.c
2244 @@ -93,6 +93,7 @@ static struct rpmhpd sdm845_mx = {
2245
2246 static struct rpmhpd sdm845_mx_ao = {
2247 .pd = { .name = "mx_ao", },
2248 + .active_only = true,
2249 .peer = &sdm845_mx,
2250 .res_name = "mx.lvl",
2251 };
2252 @@ -107,6 +108,7 @@ static struct rpmhpd sdm845_cx = {
2253
2254 static struct rpmhpd sdm845_cx_ao = {
2255 .pd = { .name = "cx_ao", },
2256 + .active_only = true,
2257 .peer = &sdm845_cx,
2258 .parent = &sdm845_mx_ao.pd,
2259 .res_name = "cx.lvl",
2260 diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
2261 index a494543d3ae1..eb47fe5ed280 100644
2262 --- a/drivers/watchdog/qcom-wdt.c
2263 +++ b/drivers/watchdog/qcom-wdt.c
2264 @@ -246,7 +246,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
2265 }
2266
2267 /* check if there is pretimeout support */
2268 - irq = platform_get_irq(pdev, 0);
2269 + irq = platform_get_irq_optional(pdev, 0);
2270 if (irq > 0) {
2271 ret = devm_request_irq(dev, irq, qcom_wdt_isr,
2272 IRQF_TRIGGER_RISING,
2273 diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
2274 index a3a329011a06..25188d6bbe15 100644
2275 --- a/drivers/watchdog/stm32_iwdg.c
2276 +++ b/drivers/watchdog/stm32_iwdg.c
2277 @@ -262,6 +262,24 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
2278 watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
2279 watchdog_init_timeout(wdd, 0, dev);
2280
2281 + /*
2282 + * In case of CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED is set
2283 + * (Means U-Boot/bootloaders leaves the watchdog running)
2284 + * When we get here we should make a decision to prevent
2285 + * any side effects before user space daemon will take care of it.
2286 + * The best option, taking into consideration that there is no
2287 + * way to read values back from hardware, is to enforce watchdog
2288 + * being run with deterministic values.
2289 + */
2290 + if (IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) {
2291 + ret = stm32_iwdg_start(wdd);
2292 + if (ret)
2293 + return ret;
2294 +
2295 + /* Make sure the watchdog is serviced */
2296 + set_bit(WDOG_HW_RUNNING, &wdd->status);
2297 + }
2298 +
2299 ret = devm_watchdog_register_device(dev, wdd);
2300 if (ret)
2301 return ret;
2302 diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
2303 index 295a7a21b774..e7dd07f47825 100644
2304 --- a/fs/nfs/Kconfig
2305 +++ b/fs/nfs/Kconfig
2306 @@ -90,7 +90,7 @@ config NFS_V4
2307 config NFS_SWAP
2308 bool "Provide swap over NFS support"
2309 default n
2310 - depends on NFS_FS
2311 + depends on NFS_FS && SWAP
2312 select SUNRPC_SWAP
2313 help
2314 This option enables swapon to work on files located on NFS mounts.
2315 diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
2316 index 040a50fd9bf3..29f00da8a0b7 100644
2317 --- a/fs/nfs/direct.c
2318 +++ b/fs/nfs/direct.c
2319 @@ -245,10 +245,10 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
2320 data->ds_commit_index);
2321
2322 /* verifier not set so always fail */
2323 - if (verfp->committed < 0)
2324 + if (verfp->committed < 0 || data->res.verf->committed <= NFS_UNSTABLE)
2325 return 1;
2326
2327 - return nfs_direct_cmp_verf(verfp, &data->verf);
2328 + return nfs_direct_cmp_verf(verfp, data->res.verf);
2329 }
2330
2331 /**
2332 diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
2333 index 602767850b36..1f60ab2535ee 100644
2334 --- a/fs/nfs/nfs3xdr.c
2335 +++ b/fs/nfs/nfs3xdr.c
2336 @@ -2338,6 +2338,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
2337 void *data)
2338 {
2339 struct nfs_commitres *result = data;
2340 + struct nfs_writeverf *verf = result->verf;
2341 enum nfs_stat status;
2342 int error;
2343
2344 @@ -2350,7 +2351,9 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
2345 result->op_status = status;
2346 if (status != NFS3_OK)
2347 goto out_status;
2348 - error = decode_writeverf3(xdr, &result->verf->verifier);
2349 + error = decode_writeverf3(xdr, &verf->verifier);
2350 + if (!error)
2351 + verf->committed = NFS_FILE_SYNC;
2352 out:
2353 return error;
2354 out_status:
2355 diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
2356 index 16b2e5cc3e94..bb322d9de313 100644
2357 --- a/fs/nfs/nfs4_fs.h
2358 +++ b/fs/nfs/nfs4_fs.h
2359 @@ -439,9 +439,7 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *);
2360 extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
2361 extern void nfs4_kill_renewd(struct nfs_client *);
2362 extern void nfs4_renew_state(struct work_struct *);
2363 -extern void nfs4_set_lease_period(struct nfs_client *clp,
2364 - unsigned long lease,
2365 - unsigned long lastrenewed);
2366 +extern void nfs4_set_lease_period(struct nfs_client *clp, unsigned long lease);
2367
2368
2369 /* nfs4state.c */
2370 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2371 index f26d714f9f28..423960d480f1 100644
2372 --- a/fs/nfs/nfs4proc.c
2373 +++ b/fs/nfs/nfs4proc.c
2374 @@ -3187,6 +3187,11 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
2375 exception.retry = 1;
2376 continue;
2377 }
2378 + if (status == -NFS4ERR_EXPIRED) {
2379 + nfs4_schedule_lease_recovery(server->nfs_client);
2380 + exception.retry = 1;
2381 + continue;
2382 + }
2383 if (status == -EAGAIN) {
2384 /* We must have found a delegation */
2385 exception.retry = 1;
2386 @@ -5019,16 +5024,13 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
2387 struct nfs4_exception exception = {
2388 .interruptible = true,
2389 };
2390 - unsigned long now = jiffies;
2391 int err;
2392
2393 do {
2394 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
2395 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
2396 if (err == 0) {
2397 - nfs4_set_lease_period(server->nfs_client,
2398 - fsinfo->lease_time * HZ,
2399 - now);
2400 + nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
2401 break;
2402 }
2403 err = nfs4_handle_exception(server, err, &exception);
2404 @@ -6084,6 +6086,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
2405 .callback_data = &setclientid,
2406 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
2407 };
2408 + unsigned long now = jiffies;
2409 int status;
2410
2411 /* nfs_client_id4 */
2412 @@ -6116,6 +6119,9 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
2413 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
2414 put_rpccred(setclientid.sc_cred);
2415 }
2416 +
2417 + if (status == 0)
2418 + do_renew_lease(clp, now);
2419 out:
2420 trace_nfs4_setclientid(clp, status);
2421 dprintk("NFS reply setclientid: %d\n", status);
2422 @@ -8199,6 +8205,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
2423 struct rpc_task *task;
2424 struct nfs41_exchange_id_args *argp;
2425 struct nfs41_exchange_id_res *resp;
2426 + unsigned long now = jiffies;
2427 int status;
2428
2429 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
2430 @@ -8219,6 +8226,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
2431 if (status != 0)
2432 goto out;
2433
2434 + do_renew_lease(clp, now);
2435 +
2436 clp->cl_clientid = resp->clientid;
2437 clp->cl_exchange_flags = resp->flags;
2438 clp->cl_seqid = resp->seqid;
2439 diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
2440 index 6ea431b067dd..ff876dda7f06 100644
2441 --- a/fs/nfs/nfs4renewd.c
2442 +++ b/fs/nfs/nfs4renewd.c
2443 @@ -138,15 +138,12 @@ nfs4_kill_renewd(struct nfs_client *clp)
2444 *
2445 * @clp: pointer to nfs_client
2446 * @lease: new value for lease period
2447 - * @lastrenewed: time at which lease was last renewed
2448 */
2449 void nfs4_set_lease_period(struct nfs_client *clp,
2450 - unsigned long lease,
2451 - unsigned long lastrenewed)
2452 + unsigned long lease)
2453 {
2454 spin_lock(&clp->cl_lock);
2455 clp->cl_lease_time = lease;
2456 - clp->cl_last_renewal = lastrenewed;
2457 spin_unlock(&clp->cl_lock);
2458
2459 /* Cap maximum reconnect timeout at 1/2 lease period */
2460 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2461 index 0c6d53dc3672..b53bcf40e2a7 100644
2462 --- a/fs/nfs/nfs4state.c
2463 +++ b/fs/nfs/nfs4state.c
2464 @@ -91,17 +91,15 @@ static int nfs4_setup_state_renewal(struct nfs_client *clp)
2465 {
2466 int status;
2467 struct nfs_fsinfo fsinfo;
2468 - unsigned long now;
2469
2470 if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
2471 nfs4_schedule_state_renewal(clp);
2472 return 0;
2473 }
2474
2475 - now = jiffies;
2476 status = nfs4_proc_get_lease_time(clp, &fsinfo);
2477 if (status == 0) {
2478 - nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
2479 + nfs4_set_lease_period(clp, fsinfo.lease_time * HZ);
2480 nfs4_schedule_state_renewal(clp);
2481 }
2482
2483 diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
2484 index b2f395fa7350..9398c0b6e0a3 100644
2485 --- a/fs/nfs/nfs4trace.h
2486 +++ b/fs/nfs/nfs4trace.h
2487 @@ -352,7 +352,7 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
2488 ),
2489
2490 TP_fast_assign(
2491 - __entry->error = error;
2492 + __entry->error = error < 0 ? -error : 0;
2493 __assign_str(dstaddr, clp->cl_hostname);
2494 ),
2495
2496 @@ -432,7 +432,8 @@ TRACE_EVENT(nfs4_sequence_done,
2497 __entry->target_highest_slotid =
2498 res->sr_target_highest_slotid;
2499 __entry->status_flags = res->sr_status_flags;
2500 - __entry->error = res->sr_status;
2501 + __entry->error = res->sr_status < 0 ?
2502 + -res->sr_status : 0;
2503 ),
2504 TP_printk(
2505 "error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
2506 @@ -566,7 +567,7 @@ TRACE_EVENT(nfs4_xdr_status,
2507 TP_PROTO(
2508 const struct xdr_stream *xdr,
2509 u32 op,
2510 - int error
2511 + u32 error
2512 ),
2513
2514 TP_ARGS(xdr, op, error),
2515 @@ -756,7 +757,7 @@ TRACE_EVENT(nfs4_close,
2516 __entry->fileid = NFS_FILEID(inode);
2517 __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
2518 __entry->fmode = (__force unsigned int)state->state;
2519 - __entry->error = error;
2520 + __entry->error = error < 0 ? -error : 0;
2521 __entry->stateid_seq =
2522 be32_to_cpu(args->stateid.seqid);
2523 __entry->stateid_hash =
2524 @@ -821,7 +822,7 @@ DECLARE_EVENT_CLASS(nfs4_lock_event,
2525 TP_fast_assign(
2526 const struct inode *inode = state->inode;
2527
2528 - __entry->error = error;
2529 + __entry->error = error < 0 ? -error : 0;
2530 __entry->cmd = cmd;
2531 __entry->type = request->fl_type;
2532 __entry->start = request->fl_start;
2533 @@ -893,7 +894,7 @@ TRACE_EVENT(nfs4_set_lock,
2534 TP_fast_assign(
2535 const struct inode *inode = state->inode;
2536
2537 - __entry->error = error;
2538 + __entry->error = error < 0 ? -error : 0;
2539 __entry->cmd = cmd;
2540 __entry->type = request->fl_type;
2541 __entry->start = request->fl_start;
2542 @@ -989,7 +990,7 @@ TRACE_EVENT(nfs4_delegreturn_exit,
2543 TP_fast_assign(
2544 __entry->dev = res->server->s_dev;
2545 __entry->fhandle = nfs_fhandle_hash(args->fhandle);
2546 - __entry->error = error;
2547 + __entry->error = error < 0 ? -error : 0;
2548 __entry->stateid_seq =
2549 be32_to_cpu(args->stateid->seqid);
2550 __entry->stateid_hash =
2551 @@ -1029,7 +1030,7 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
2552 TP_fast_assign(
2553 const struct inode *inode = state->inode;
2554
2555 - __entry->error = error;
2556 + __entry->error = error < 0 ? -error : 0;
2557 __entry->dev = inode->i_sb->s_dev;
2558 __entry->fileid = NFS_FILEID(inode);
2559 __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
2560 @@ -1131,7 +1132,7 @@ TRACE_EVENT(nfs4_lookupp,
2561 TP_fast_assign(
2562 __entry->dev = inode->i_sb->s_dev;
2563 __entry->ino = NFS_FILEID(inode);
2564 - __entry->error = error;
2565 + __entry->error = error < 0 ? -error : 0;
2566 ),
2567
2568 TP_printk(
2569 @@ -1167,7 +1168,7 @@ TRACE_EVENT(nfs4_rename,
2570 __entry->dev = olddir->i_sb->s_dev;
2571 __entry->olddir = NFS_FILEID(olddir);
2572 __entry->newdir = NFS_FILEID(newdir);
2573 - __entry->error = error;
2574 + __entry->error = error < 0 ? -error : 0;
2575 __assign_str(oldname, oldname->name);
2576 __assign_str(newname, newname->name);
2577 ),
2578 @@ -1258,7 +1259,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event,
2579 __entry->dev = inode->i_sb->s_dev;
2580 __entry->fileid = NFS_FILEID(inode);
2581 __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
2582 - __entry->error = error;
2583 + __entry->error = error < 0 ? -error : 0;
2584 __entry->stateid_seq =
2585 be32_to_cpu(stateid->seqid);
2586 __entry->stateid_hash =
2587 @@ -1314,7 +1315,7 @@ DECLARE_EVENT_CLASS(nfs4_getattr_event,
2588 __entry->valid = fattr->valid;
2589 __entry->fhandle = nfs_fhandle_hash(fhandle);
2590 __entry->fileid = (fattr->valid & NFS_ATTR_FATTR_FILEID) ? fattr->fileid : 0;
2591 - __entry->error = error;
2592 + __entry->error = error < 0 ? -error : 0;
2593 ),
2594
2595 TP_printk(
2596 @@ -1361,7 +1362,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
2597 ),
2598
2599 TP_fast_assign(
2600 - __entry->error = error;
2601 + __entry->error = error < 0 ? -error : 0;
2602 __entry->fhandle = nfs_fhandle_hash(fhandle);
2603 if (!IS_ERR_OR_NULL(inode)) {
2604 __entry->fileid = NFS_FILEID(inode);
2605 @@ -1418,7 +1419,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
2606 ),
2607
2608 TP_fast_assign(
2609 - __entry->error = error;
2610 + __entry->error = error < 0 ? -error : 0;
2611 __entry->fhandle = nfs_fhandle_hash(fhandle);
2612 if (!IS_ERR_OR_NULL(inode)) {
2613 __entry->fileid = NFS_FILEID(inode);
2614 @@ -1721,7 +1722,7 @@ TRACE_EVENT(nfs4_layoutget,
2615 __entry->iomode = args->iomode;
2616 __entry->offset = args->offset;
2617 __entry->count = args->length;
2618 - __entry->error = error;
2619 + __entry->error = error < 0 ? -error : 0;
2620 __entry->stateid_seq =
2621 be32_to_cpu(state->stateid.seqid);
2622 __entry->stateid_hash =
2623 diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
2624 index ab07db0f07cd..7c0ff1a3b591 100644
2625 --- a/fs/nfs/nfs4xdr.c
2626 +++ b/fs/nfs/nfs4xdr.c
2627 @@ -4316,11 +4316,14 @@ static int decode_write_verifier(struct xdr_stream *xdr, struct nfs_write_verifi
2628
2629 static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
2630 {
2631 + struct nfs_writeverf *verf = res->verf;
2632 int status;
2633
2634 status = decode_op_hdr(xdr, OP_COMMIT);
2635 if (!status)
2636 - status = decode_write_verifier(xdr, &res->verf->verifier);
2637 + status = decode_write_verifier(xdr, &verf->verifier);
2638 + if (!status)
2639 + verf->committed = NFS_FILE_SYNC;
2640 return status;
2641 }
2642
2643 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
2644 index bb80034a7661..443639cbb0cf 100644
2645 --- a/fs/nfs/pnfs.c
2646 +++ b/fs/nfs/pnfs.c
2647 @@ -1425,7 +1425,7 @@ retry:
2648 /* lo ref dropped in pnfs_roc_release() */
2649 layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
2650 /* If the creds don't match, we can't compound the layoutreturn */
2651 - if (!layoutreturn || cred != lo->plh_lc_cred)
2652 + if (!layoutreturn || cred_fscmp(cred, lo->plh_lc_cred) != 0)
2653 goto out_noroc;
2654
2655 roc = layoutreturn;
2656 diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
2657 index 82af4809b869..8b37e7f8e789 100644
2658 --- a/fs/nfs/pnfs_nfs.c
2659 +++ b/fs/nfs/pnfs_nfs.c
2660 @@ -31,12 +31,11 @@ EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
2661 /* Fake up some data that will cause nfs_commit_release to retry the writes. */
2662 void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
2663 {
2664 - struct nfs_page *first = nfs_list_entry(data->pages.next);
2665 + struct nfs_writeverf *verf = data->res.verf;
2666
2667 data->task.tk_status = 0;
2668 - memcpy(&data->verf.verifier, &first->wb_verf,
2669 - sizeof(data->verf.verifier));
2670 - data->verf.verifier.data[0]++; /* ensure verifier mismatch */
2671 + memset(&verf->verifier, 0, sizeof(verf->verifier));
2672 + verf->committed = NFS_UNSTABLE;
2673 }
2674 EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
2675
2676 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
2677 index 52cab65f91cf..913eb37c249b 100644
2678 --- a/fs/nfs/write.c
2679 +++ b/fs/nfs/write.c
2680 @@ -243,7 +243,15 @@ out:
2681 /* A writeback failed: mark the page as bad, and invalidate the page cache */
2682 static void nfs_set_pageerror(struct address_space *mapping)
2683 {
2684 + struct inode *inode = mapping->host;
2685 +
2686 nfs_zap_mapping(mapping->host, mapping);
2687 + /* Force file size revalidation */
2688 + spin_lock(&inode->i_lock);
2689 + NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED |
2690 + NFS_INO_REVAL_PAGECACHE |
2691 + NFS_INO_INVALID_SIZE;
2692 + spin_unlock(&inode->i_lock);
2693 }
2694
2695 static void nfs_mapping_set_error(struct page *page, int error)
2696 @@ -1829,6 +1837,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
2697
2698 static void nfs_commit_release_pages(struct nfs_commit_data *data)
2699 {
2700 + const struct nfs_writeverf *verf = data->res.verf;
2701 struct nfs_page *req;
2702 int status = data->task.tk_status;
2703 struct nfs_commit_info cinfo;
2704 @@ -1856,7 +1865,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
2705
2706 /* Okay, COMMIT succeeded, apparently. Check the verifier
2707 * returned by the server against all stored verfs. */
2708 - if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
2709 + if (verf->committed > NFS_UNSTABLE &&
2710 + !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) {
2711 /* We have a match */
2712 if (req->wb_page)
2713 nfs_inode_remove_request(req);
2714 diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
2715 index 44c52639db55..75c7b5ed53c5 100644
2716 --- a/include/rdma/ib_verbs.h
2717 +++ b/include/rdma/ib_verbs.h
2718 @@ -4252,6 +4252,9 @@ static inline int ib_check_mr_access(int flags)
2719 !(flags & IB_ACCESS_LOCAL_WRITE))
2720 return -EINVAL;
2721
2722 + if (flags & ~IB_ACCESS_SUPPORTED)
2723 + return -EINVAL;
2724 +
2725 return 0;
2726 }
2727
2728 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2729 index 8dacda4b0362..00743684a549 100644
2730 --- a/kernel/sched/core.c
2731 +++ b/kernel/sched/core.c
2732 @@ -7090,6 +7090,12 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
2733
2734 if (parent)
2735 sched_online_group(tg, parent);
2736 +
2737 +#ifdef CONFIG_UCLAMP_TASK_GROUP
2738 + /* Propagate the effective uclamp value for the new group */
2739 + cpu_util_update_eff(css);
2740 +#endif
2741 +
2742 return 0;
2743 }
2744
2745 diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
2746 index da5639a5bd3b..0147b26f585a 100644
2747 --- a/net/core/bpf_sk_storage.c
2748 +++ b/net/core/bpf_sk_storage.c
2749 @@ -643,9 +643,10 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
2750 return ERR_PTR(-ENOMEM);
2751 bpf_map_init_from_attr(&smap->map, attr);
2752
2753 + nbuckets = roundup_pow_of_two(num_possible_cpus());
2754 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
2755 - smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus())));
2756 - nbuckets = 1U << smap->bucket_log;
2757 + nbuckets = max_t(u32, 2, nbuckets);
2758 + smap->bucket_log = ilog2(nbuckets);
2759 cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
2760
2761 ret = bpf_map_charge_init(&smap->map.memory, cost);
2762 diff --git a/net/core/sock_map.c b/net/core/sock_map.c
2763 index 8998e356f423..085cef5857bb 100644
2764 --- a/net/core/sock_map.c
2765 +++ b/net/core/sock_map.c
2766 @@ -234,7 +234,6 @@ static void sock_map_free(struct bpf_map *map)
2767 int i;
2768
2769 synchronize_rcu();
2770 - rcu_read_lock();
2771 raw_spin_lock_bh(&stab->lock);
2772 for (i = 0; i < stab->map.max_entries; i++) {
2773 struct sock **psk = &stab->sks[i];
2774 @@ -243,13 +242,15 @@ static void sock_map_free(struct bpf_map *map)
2775 sk = xchg(psk, NULL);
2776 if (sk) {
2777 lock_sock(sk);
2778 + rcu_read_lock();
2779 sock_map_unref(sk, psk);
2780 + rcu_read_unlock();
2781 release_sock(sk);
2782 }
2783 }
2784 raw_spin_unlock_bh(&stab->lock);
2785 - rcu_read_unlock();
2786
2787 + /* wait for psock readers accessing its map link */
2788 synchronize_rcu();
2789
2790 bpf_map_area_free(stab->sks);
2791 @@ -416,14 +417,16 @@ static int sock_map_update_elem(struct bpf_map *map, void *key,
2792 ret = -EINVAL;
2793 goto out;
2794 }
2795 - if (!sock_map_sk_is_suitable(sk) ||
2796 - sk->sk_state != TCP_ESTABLISHED) {
2797 + if (!sock_map_sk_is_suitable(sk)) {
2798 ret = -EOPNOTSUPP;
2799 goto out;
2800 }
2801
2802 sock_map_sk_acquire(sk);
2803 - ret = sock_map_update_common(map, idx, sk, flags);
2804 + if (sk->sk_state != TCP_ESTABLISHED)
2805 + ret = -EOPNOTSUPP;
2806 + else
2807 + ret = sock_map_update_common(map, idx, sk, flags);
2808 sock_map_sk_release(sk);
2809 out:
2810 fput(sock->file);
2811 @@ -739,14 +742,16 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key,
2812 ret = -EINVAL;
2813 goto out;
2814 }
2815 - if (!sock_map_sk_is_suitable(sk) ||
2816 - sk->sk_state != TCP_ESTABLISHED) {
2817 + if (!sock_map_sk_is_suitable(sk)) {
2818 ret = -EOPNOTSUPP;
2819 goto out;
2820 }
2821
2822 sock_map_sk_acquire(sk);
2823 - ret = sock_hash_update_common(map, key, sk, flags);
2824 + if (sk->sk_state != TCP_ESTABLISHED)
2825 + ret = -EOPNOTSUPP;
2826 + else
2827 + ret = sock_hash_update_common(map, key, sk, flags);
2828 sock_map_sk_release(sk);
2829 out:
2830 fput(sock->file);
2831 @@ -859,19 +864,22 @@ static void sock_hash_free(struct bpf_map *map)
2832 int i;
2833
2834 synchronize_rcu();
2835 - rcu_read_lock();
2836 for (i = 0; i < htab->buckets_num; i++) {
2837 bucket = sock_hash_select_bucket(htab, i);
2838 raw_spin_lock_bh(&bucket->lock);
2839 hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
2840 hlist_del_rcu(&elem->node);
2841 lock_sock(elem->sk);
2842 + rcu_read_lock();
2843 sock_map_unref(elem->sk, elem);
2844 + rcu_read_unlock();
2845 release_sock(elem->sk);
2846 }
2847 raw_spin_unlock_bh(&bucket->lock);
2848 }
2849 - rcu_read_unlock();
2850 +
2851 + /* wait for psock readers accessing its map link */
2852 + synchronize_rcu();
2853
2854 bpf_map_area_free(htab->buckets);
2855 kfree(htab);
2856 diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
2857 index c443db7af8d4..463cefc1e5ae 100644
2858 --- a/net/vmw_vsock/hyperv_transport.c
2859 +++ b/net/vmw_vsock/hyperv_transport.c
2860 @@ -136,28 +136,15 @@ struct hvsock {
2861 ****************************************************************************
2862 * The only valid Service GUIDs, from the perspectives of both the host and *
2863 * Linux VM, that can be connected by the other end, must conform to this *
2864 - * format: <port>-facb-11e6-bd58-64006a7986d3, and the "port" must be in *
2865 - * this range [0, 0x7FFFFFFF]. *
2866 + * format: <port>-facb-11e6-bd58-64006a7986d3. *
2867 ****************************************************************************
2868 *
2869 * When we write apps on the host to connect(), the GUID ServiceID is used.
2870 * When we write apps in Linux VM to connect(), we only need to specify the
2871 * port and the driver will form the GUID and use that to request the host.
2872 *
2873 - * From the perspective of Linux VM:
2874 - * 1. the local ephemeral port (i.e. the local auto-bound port when we call
2875 - * connect() without explicit bind()) is generated by __vsock_bind_stream(),
2876 - * and the range is [1024, 0xFFFFFFFF).
2877 - * 2. the remote ephemeral port (i.e. the auto-generated remote port for
2878 - * a connect request initiated by the host's connect()) is generated by
2879 - * hvs_remote_addr_init() and the range is [0x80000000, 0xFFFFFFFF).
2880 */
2881
2882 -#define MAX_LISTEN_PORT ((u32)0x7FFFFFFF)
2883 -#define MAX_VM_LISTEN_PORT MAX_LISTEN_PORT
2884 -#define MAX_HOST_LISTEN_PORT MAX_LISTEN_PORT
2885 -#define MIN_HOST_EPHEMERAL_PORT (MAX_HOST_LISTEN_PORT + 1)
2886 -
2887 /* 00000000-facb-11e6-bd58-64006a7986d3 */
2888 static const guid_t srv_id_template =
2889 GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58,
2890 @@ -180,33 +167,6 @@ static void hvs_addr_init(struct sockaddr_vm *addr, const guid_t *svr_id)
2891 vsock_addr_init(addr, VMADDR_CID_ANY, port);
2892 }
2893
2894 -static void hvs_remote_addr_init(struct sockaddr_vm *remote,
2895 - struct sockaddr_vm *local)
2896 -{
2897 - static u32 host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
2898 - struct sock *sk;
2899 -
2900 - vsock_addr_init(remote, VMADDR_CID_ANY, VMADDR_PORT_ANY);
2901 -
2902 - while (1) {
2903 - /* Wrap around ? */
2904 - if (host_ephemeral_port < MIN_HOST_EPHEMERAL_PORT ||
2905 - host_ephemeral_port == VMADDR_PORT_ANY)
2906 - host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
2907 -
2908 - remote->svm_port = host_ephemeral_port++;
2909 -
2910 - sk = vsock_find_connected_socket(remote, local);
2911 - if (!sk) {
2912 - /* Found an available ephemeral port */
2913 - return;
2914 - }
2915 -
2916 - /* Release refcnt got in vsock_find_connected_socket */
2917 - sock_put(sk);
2918 - }
2919 -}
2920 -
2921 static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan)
2922 {
2923 set_channel_pending_send_size(chan,
2924 @@ -336,12 +296,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
2925 if_type = &chan->offermsg.offer.if_type;
2926 if_instance = &chan->offermsg.offer.if_instance;
2927 conn_from_host = chan->offermsg.offer.u.pipe.user_def[0];
2928 -
2929 - /* The host or the VM should only listen on a port in
2930 - * [0, MAX_LISTEN_PORT]
2931 - */
2932 - if (!is_valid_srv_id(if_type) ||
2933 - get_port_by_srv_id(if_type) > MAX_LISTEN_PORT)
2934 + if (!is_valid_srv_id(if_type))
2935 return;
2936
2937 hvs_addr_init(&addr, conn_from_host ? if_type : if_instance);
2938 @@ -365,6 +320,13 @@ static void hvs_open_connection(struct vmbus_channel *chan)
2939
2940 new->sk_state = TCP_SYN_SENT;
2941 vnew = vsock_sk(new);
2942 +
2943 + hvs_addr_init(&vnew->local_addr, if_type);
2944 +
2945 + /* Remote peer is always the host */
2946 + vsock_addr_init(&vnew->remote_addr,
2947 + VMADDR_CID_HOST, VMADDR_PORT_ANY);
2948 + vnew->remote_addr.svm_port = get_port_by_srv_id(if_instance);
2949 hvs_new = vnew->trans;
2950 hvs_new->chan = chan;
2951 } else {
2952 @@ -429,8 +391,6 @@ static void hvs_open_connection(struct vmbus_channel *chan)
2953 sk->sk_ack_backlog++;
2954
2955 hvs_addr_init(&vnew->local_addr, if_type);
2956 - hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr);
2957 -
2958 hvs_new->vm_srv_id = *if_type;
2959 hvs_new->host_srv_id = *if_instance;
2960
2961 @@ -753,16 +713,6 @@ static bool hvs_stream_is_active(struct vsock_sock *vsk)
2962
2963 static bool hvs_stream_allow(u32 cid, u32 port)
2964 {
2965 - /* The host's port range [MIN_HOST_EPHEMERAL_PORT, 0xFFFFFFFF) is
2966 - * reserved as ephemeral ports, which are used as the host's ports
2967 - * when the host initiates connections.
2968 - *
2969 - * Perform this check in the guest so an immediate error is produced
2970 - * instead of a timeout.
2971 - */
2972 - if (port > MAX_HOST_LISTEN_PORT)
2973 - return false;
2974 -
2975 if (cid == VMADDR_CID_HOST)
2976 return true;
2977
2978 diff --git a/security/selinux/avc.c b/security/selinux/avc.c
2979 index ecd3829996aa..23dc888ae305 100644
2980 --- a/security/selinux/avc.c
2981 +++ b/security/selinux/avc.c
2982 @@ -424,7 +424,7 @@ static inline int avc_xperms_audit(struct selinux_state *state,
2983 if (likely(!audited))
2984 return 0;
2985 return slow_avc_audit(state, ssid, tsid, tclass, requested,
2986 - audited, denied, result, ad, 0);
2987 + audited, denied, result, ad);
2988 }
2989
2990 static void avc_node_free(struct rcu_head *rhead)
2991 @@ -758,8 +758,7 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
2992 noinline int slow_avc_audit(struct selinux_state *state,
2993 u32 ssid, u32 tsid, u16 tclass,
2994 u32 requested, u32 audited, u32 denied, int result,
2995 - struct common_audit_data *a,
2996 - unsigned int flags)
2997 + struct common_audit_data *a)
2998 {
2999 struct common_audit_data stack_data;
3000 struct selinux_audit_data sad;
3001 @@ -772,17 +771,6 @@ noinline int slow_avc_audit(struct selinux_state *state,
3002 a->type = LSM_AUDIT_DATA_NONE;
3003 }
3004
3005 - /*
3006 - * When in a RCU walk do the audit on the RCU retry. This is because
3007 - * the collection of the dname in an inode audit message is not RCU
3008 - * safe. Note this may drop some audits when the situation changes
3009 - * during retry. However this is logically just as if the operation
3010 - * happened a little later.
3011 - */
3012 - if ((a->type == LSM_AUDIT_DATA_INODE) &&
3013 - (flags & MAY_NOT_BLOCK))
3014 - return -ECHILD;
3015 -
3016 sad.tclass = tclass;
3017 sad.requested = requested;
3018 sad.ssid = ssid;
3019 @@ -855,15 +843,14 @@ static int avc_update_node(struct selinux_avc *avc,
3020 /*
3021 * If we are in a non-blocking code path, e.g. VFS RCU walk,
3022 * then we must not add permissions to a cache entry
3023 - * because we cannot safely audit the denial. Otherwise,
3024 + * because we will not audit the denial. Otherwise,
3025 * during the subsequent blocking retry (e.g. VFS ref walk), we
3026 * will find the permissions already granted in the cache entry
3027 * and won't audit anything at all, leading to silent denials in
3028 * permissive mode that only appear when in enforcing mode.
3029 *
3030 - * See the corresponding handling in slow_avc_audit(), and the
3031 - * logic in selinux_inode_permission for the MAY_NOT_BLOCK flag,
3032 - * which is transliterated into AVC_NONBLOCKING.
3033 + * See the corresponding handling of MAY_NOT_BLOCK in avc_audit()
3034 + * and selinux_inode_permission().
3035 */
3036 if (flags & AVC_NONBLOCKING)
3037 return 0;
3038 @@ -1205,6 +1192,25 @@ int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
3039 return rc;
3040 }
3041
3042 +int avc_has_perm_flags(struct selinux_state *state,
3043 + u32 ssid, u32 tsid, u16 tclass, u32 requested,
3044 + struct common_audit_data *auditdata,
3045 + int flags)
3046 +{
3047 + struct av_decision avd;
3048 + int rc, rc2;
3049 +
3050 + rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested,
3051 + (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
3052 + &avd);
3053 +
3054 + rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
3055 + auditdata, flags);
3056 + if (rc2)
3057 + return rc2;
3058 + return rc;
3059 +}
3060 +
3061 u32 avc_policy_seqno(struct selinux_state *state)
3062 {
3063 return state->avc->avc_cache.latest_notif;
3064 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
3065 index 9625b99e677f..39410913a694 100644
3066 --- a/security/selinux/hooks.c
3067 +++ b/security/selinux/hooks.c
3068 @@ -2766,6 +2766,14 @@ static int selinux_mount(const char *dev_name,
3069 return path_has_perm(cred, path, FILE__MOUNTON);
3070 }
3071
3072 +static int selinux_move_mount(const struct path *from_path,
3073 + const struct path *to_path)
3074 +{
3075 + const struct cred *cred = current_cred();
3076 +
3077 + return path_has_perm(cred, to_path, FILE__MOUNTON);
3078 +}
3079 +
3080 static int selinux_umount(struct vfsmount *mnt, int flags)
3081 {
3082 const struct cred *cred = current_cred();
3083 @@ -3008,14 +3016,14 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
3084 if (IS_ERR(isec))
3085 return PTR_ERR(isec);
3086
3087 - return avc_has_perm(&selinux_state,
3088 - sid, isec->sid, isec->sclass, FILE__READ, &ad);
3089 + return avc_has_perm_flags(&selinux_state,
3090 + sid, isec->sid, isec->sclass, FILE__READ, &ad,
3091 + rcu ? MAY_NOT_BLOCK : 0);
3092 }
3093
3094 static noinline int audit_inode_permission(struct inode *inode,
3095 u32 perms, u32 audited, u32 denied,
3096 - int result,
3097 - unsigned flags)
3098 + int result)
3099 {
3100 struct common_audit_data ad;
3101 struct inode_security_struct *isec = selinux_inode(inode);
3102 @@ -3026,7 +3034,7 @@ static noinline int audit_inode_permission(struct inode *inode,
3103
3104 rc = slow_avc_audit(&selinux_state,
3105 current_sid(), isec->sid, isec->sclass, perms,
3106 - audited, denied, result, &ad, flags);
3107 + audited, denied, result, &ad);
3108 if (rc)
3109 return rc;
3110 return 0;
3111 @@ -3073,7 +3081,11 @@ static int selinux_inode_permission(struct inode *inode, int mask)
3112 if (likely(!audited))
3113 return rc;
3114
3115 - rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags);
3116 + /* fall back to ref-walk if we have to generate audit */
3117 + if (flags & MAY_NOT_BLOCK)
3118 + return -ECHILD;
3119 +
3120 + rc2 = audit_inode_permission(inode, perms, audited, denied, rc);
3121 if (rc2)
3122 return rc2;
3123 return rc;
3124 @@ -6834,6 +6846,8 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
3125 LSM_HOOK_INIT(sb_clone_mnt_opts, selinux_sb_clone_mnt_opts),
3126 LSM_HOOK_INIT(sb_add_mnt_opt, selinux_add_mnt_opt),
3127
3128 + LSM_HOOK_INIT(move_mount, selinux_move_mount),
3129 +
3130 LSM_HOOK_INIT(dentry_init_security, selinux_dentry_init_security),
3131 LSM_HOOK_INIT(dentry_create_files_as, selinux_dentry_create_files_as),
3132
3133 diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
3134 index 7be0e1e90e8b..cf4cc3ef959b 100644
3135 --- a/security/selinux/include/avc.h
3136 +++ b/security/selinux/include/avc.h
3137 @@ -100,8 +100,7 @@ static inline u32 avc_audit_required(u32 requested,
3138 int slow_avc_audit(struct selinux_state *state,
3139 u32 ssid, u32 tsid, u16 tclass,
3140 u32 requested, u32 audited, u32 denied, int result,
3141 - struct common_audit_data *a,
3142 - unsigned flags);
3143 + struct common_audit_data *a);
3144
3145 /**
3146 * avc_audit - Audit the granting or denial of permissions.
3147 @@ -135,9 +134,12 @@ static inline int avc_audit(struct selinux_state *state,
3148 audited = avc_audit_required(requested, avd, result, 0, &denied);
3149 if (likely(!audited))
3150 return 0;
3151 + /* fall back to ref-walk if we have to generate audit */
3152 + if (flags & MAY_NOT_BLOCK)
3153 + return -ECHILD;
3154 return slow_avc_audit(state, ssid, tsid, tclass,
3155 requested, audited, denied, result,
3156 - a, flags);
3157 + a);
3158 }
3159
3160 #define AVC_STRICT 1 /* Ignore permissive mode. */
3161 @@ -153,6 +155,11 @@ int avc_has_perm(struct selinux_state *state,
3162 u32 ssid, u32 tsid,
3163 u16 tclass, u32 requested,
3164 struct common_audit_data *auditdata);
3165 +int avc_has_perm_flags(struct selinux_state *state,
3166 + u32 ssid, u32 tsid,
3167 + u16 tclass, u32 requested,
3168 + struct common_audit_data *auditdata,
3169 + int flags);
3170
3171 int avc_has_extended_perms(struct selinux_state *state,
3172 u32 ssid, u32 tsid, u16 tclass, u32 requested,
3173 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
3174 index d07026a846b9..8712a91e0e3e 100644
3175 --- a/sound/soc/soc-pcm.c
3176 +++ b/sound/soc/soc-pcm.c
3177 @@ -2297,42 +2297,81 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
3178 }
3179 EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
3180
3181 +static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream,
3182 + int cmd, bool fe_first)
3183 +{
3184 + struct snd_soc_pcm_runtime *fe = substream->private_data;
3185 + int ret;
3186 +
3187 + /* call trigger on the frontend before the backend. */
3188 + if (fe_first) {
3189 + dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
3190 + fe->dai_link->name, cmd);
3191 +
3192 + ret = soc_pcm_trigger(substream, cmd);
3193 + if (ret < 0)
3194 + return ret;
3195 +
3196 + ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
3197 + return ret;
3198 + }
3199 +
3200 + /* call trigger on the frontend after the backend. */
3201 + ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
3202 + if (ret < 0)
3203 + return ret;
3204 +
3205 + dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
3206 + fe->dai_link->name, cmd);
3207 +
3208 + ret = soc_pcm_trigger(substream, cmd);
3209 +
3210 + return ret;
3211 +}
3212 +
3213 static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
3214 {
3215 struct snd_soc_pcm_runtime *fe = substream->private_data;
3216 - int stream = substream->stream, ret;
3217 + int stream = substream->stream;
3218 + int ret = 0;
3219 enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
3220
3221 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
3222
3223 switch (trigger) {
3224 case SND_SOC_DPCM_TRIGGER_PRE:
3225 - /* call trigger on the frontend before the backend. */
3226 -
3227 - dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
3228 - fe->dai_link->name, cmd);
3229 -
3230 - ret = soc_pcm_trigger(substream, cmd);
3231 - if (ret < 0) {
3232 - dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
3233 - goto out;
3234 + switch (cmd) {
3235 + case SNDRV_PCM_TRIGGER_START:
3236 + case SNDRV_PCM_TRIGGER_RESUME:
3237 + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
3238 + ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
3239 + break;
3240 + case SNDRV_PCM_TRIGGER_STOP:
3241 + case SNDRV_PCM_TRIGGER_SUSPEND:
3242 + case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
3243 + ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
3244 + break;
3245 + default:
3246 + ret = -EINVAL;
3247 + break;
3248 }
3249 -
3250 - ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
3251 break;
3252 case SND_SOC_DPCM_TRIGGER_POST:
3253 - /* call trigger on the frontend after the backend. */
3254 -
3255 - ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
3256 - if (ret < 0) {
3257 - dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
3258 - goto out;
3259 + switch (cmd) {
3260 + case SNDRV_PCM_TRIGGER_START:
3261 + case SNDRV_PCM_TRIGGER_RESUME:
3262 + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
3263 + ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
3264 + break;
3265 + case SNDRV_PCM_TRIGGER_STOP:
3266 + case SNDRV_PCM_TRIGGER_SUSPEND:
3267 + case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
3268 + ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
3269 + break;
3270 + default:
3271 + ret = -EINVAL;
3272 + break;
3273 }
3274 -
3275 - dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
3276 - fe->dai_link->name, cmd);
3277 -
3278 - ret = soc_pcm_trigger(substream, cmd);
3279 break;
3280 case SND_SOC_DPCM_TRIGGER_BESPOKE:
3281 /* bespoke trigger() - handles both FE and BEs */
3282 @@ -2341,10 +2380,6 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
3283 fe->dai_link->name, cmd);
3284
3285 ret = soc_pcm_bespoke_trigger(substream, cmd);
3286 - if (ret < 0) {
3287 - dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
3288 - goto out;
3289 - }
3290 break;
3291 default:
3292 dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd,
3293 @@ -2353,6 +2388,12 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
3294 goto out;
3295 }
3296
3297 + if (ret < 0) {
3298 + dev_err(fe->dev, "ASoC: trigger FE cmd: %d failed: %d\n",
3299 + cmd, ret);
3300 + goto out;
3301 + }
3302 +
3303 switch (cmd) {
3304 case SNDRV_PCM_TRIGGER_START:
3305 case SNDRV_PCM_TRIGGER_RESUME:
3306 diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
3307 index ea0bcd58bcb9..2e388421c32f 100644
3308 --- a/tools/bpf/bpftool/prog.c
3309 +++ b/tools/bpf/bpftool/prog.c
3310 @@ -500,7 +500,7 @@ static int do_dump(int argc, char **argv)
3311 buf = (unsigned char *)(info->jited_prog_insns);
3312 member_len = info->jited_prog_len;
3313 } else { /* DUMP_XLATED */
3314 - if (info->xlated_prog_len == 0) {
3315 + if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
3316 p_err("error retrieving insn dump: kernel.kptr_restrict set?");
3317 goto err_free;
3318 }
3319 diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
3320 index 0111d246d1ca..54a2857c2510 100644
3321 --- a/tools/power/acpi/Makefile.config
3322 +++ b/tools/power/acpi/Makefile.config
3323 @@ -15,7 +15,7 @@ include $(srctree)/../../scripts/Makefile.include
3324
3325 OUTPUT=$(srctree)/
3326 ifeq ("$(origin O)", "command line")
3327 - OUTPUT := $(O)/power/acpi/
3328 + OUTPUT := $(O)/tools/power/acpi/
3329 endif
3330 #$(info Determined 'OUTPUT' to be $(OUTPUT))
3331
3332 diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
3333 new file mode 100644
3334 index 000000000000..07f5b462c2ef
3335 --- /dev/null
3336 +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
3337 @@ -0,0 +1,74 @@
3338 +// SPDX-License-Identifier: GPL-2.0
3339 +// Copyright (c) 2020 Cloudflare
3340 +
3341 +#include "test_progs.h"
3342 +
3343 +static int connected_socket_v4(void)
3344 +{
3345 + struct sockaddr_in addr = {
3346 + .sin_family = AF_INET,
3347 + .sin_port = htons(80),
3348 + .sin_addr = { inet_addr("127.0.0.1") },
3349 + };
3350 + socklen_t len = sizeof(addr);
3351 + int s, repair, err;
3352 +
3353 + s = socket(AF_INET, SOCK_STREAM, 0);
3354 + if (CHECK_FAIL(s == -1))
3355 + goto error;
3356 +
3357 + repair = TCP_REPAIR_ON;
3358 + err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
3359 + if (CHECK_FAIL(err))
3360 + goto error;
3361 +
3362 + err = connect(s, (struct sockaddr *)&addr, len);
3363 + if (CHECK_FAIL(err))
3364 + goto error;
3365 +
3366 + repair = TCP_REPAIR_OFF_NO_WP;
3367 + err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
3368 + if (CHECK_FAIL(err))
3369 + goto error;
3370 +
3371 + return s;
3372 +error:
3373 + perror(__func__);
3374 + close(s);
3375 + return -1;
3376 +}
3377 +
3378 +/* Create a map, populate it with one socket, and free the map. */
3379 +static void test_sockmap_create_update_free(enum bpf_map_type map_type)
3380 +{
3381 + const int zero = 0;
3382 + int s, map, err;
3383 +
3384 + s = connected_socket_v4();
3385 + if (CHECK_FAIL(s == -1))
3386 + return;
3387 +
3388 + map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
3389 + if (CHECK_FAIL(map == -1)) {
3390 + perror("bpf_create_map");
3391 + goto out;
3392 + }
3393 +
3394 + err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
3395 + if (CHECK_FAIL(err)) {
3396 + perror("bpf_map_update");
3397 + goto out;
3398 + }
3399 +
3400 +out:
3401 + close(map);
3402 + close(s);
3403 +}
3404 +
3405 +void test_sockmap_basic(void)
3406 +{
3407 + if (test__start_subtest("sockmap create_update_free"))
3408 + test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
3409 + if (test__start_subtest("sockhash create_update_free"))
3410 + test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
3411 +}
3412 diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
3413 index 631d397ac81b..0a356aa91aa1 100644
3414 --- a/virt/kvm/arm/aarch32.c
3415 +++ b/virt/kvm/arm/aarch32.c
3416 @@ -15,6 +15,10 @@
3417 #include <asm/kvm_emulate.h>
3418 #include <asm/kvm_hyp.h>
3419
3420 +#define DFSR_FSC_EXTABT_LPAE 0x10
3421 +#define DFSR_FSC_EXTABT_nLPAE 0x08
3422 +#define DFSR_LPAE BIT(9)
3423 +
3424 /*
3425 * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
3426 */
3427 @@ -181,10 +185,12 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
3428
3429 /* Give the guest an IMPLEMENTATION DEFINED exception */
3430 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
3431 - if (is_lpae)
3432 - *fsr = 1 << 9 | 0x34;
3433 - else
3434 - *fsr = 0x14;
3435 + if (is_lpae) {
3436 + *fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
3437 + } else {
3438 + /* no need to shuffle FS[4] into DFSR[10] as its 0 */
3439 + *fsr = DFSR_FSC_EXTABT_nLPAE;
3440 + }
3441 }
3442
3443 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
3444 diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
3445 index e2bb5bd60227..6b222100608f 100644
3446 --- a/virt/kvm/arm/arch_timer.c
3447 +++ b/virt/kvm/arm/arch_timer.c
3448 @@ -805,6 +805,7 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
3449 switch (treg) {
3450 case TIMER_REG_TVAL:
3451 val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
3452 + val &= lower_32_bits(val);
3453 break;
3454
3455 case TIMER_REG_CTL:
3456 @@ -850,7 +851,7 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
3457 {
3458 switch (treg) {
3459 case TIMER_REG_TVAL:
3460 - timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val;
3461 + timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val;
3462 break;
3463
3464 case TIMER_REG_CTL:
3465 diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
3466 index f23c9cd5684f..ce7fa37987e1 100644
3467 --- a/virt/kvm/arm/mmu.c
3468 +++ b/virt/kvm/arm/mmu.c
3469 @@ -2147,7 +2147,8 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
3470 if (!kvm->arch.pgd)
3471 return 0;
3472 trace_kvm_test_age_hva(hva);
3473 - return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
3474 + return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
3475 + kvm_test_age_hva_handler, NULL);
3476 }
3477
3478 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
3479 diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
3480 index 8731dfeced8b..4c08fd009768 100644
3481 --- a/virt/kvm/arm/pmu.c
3482 +++ b/virt/kvm/arm/pmu.c
3483 @@ -480,25 +480,45 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
3484 */
3485 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
3486 {
3487 + struct kvm_pmu *pmu = &vcpu->arch.pmu;
3488 int i;
3489 - u64 type, enable, reg;
3490
3491 - if (val == 0)
3492 + if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
3493 return;
3494
3495 - enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
3496 + /* Weed out disabled counters */
3497 + val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
3498 +
3499 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
3500 + u64 type, reg;
3501 +
3502 if (!(val & BIT(i)))
3503 continue;
3504 - type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
3505 - & ARMV8_PMU_EVTYPE_EVENT;
3506 - if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
3507 - && (enable & BIT(i))) {
3508 - reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
3509 +
3510 + /* PMSWINC only applies to ... SW_INC! */
3511 + type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
3512 + type &= ARMV8_PMU_EVTYPE_EVENT;
3513 + if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
3514 + continue;
3515 +
3516 + /* increment this even SW_INC counter */
3517 + reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
3518 + reg = lower_32_bits(reg);
3519 + __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
3520 +
3521 + if (reg) /* no overflow on the low part */
3522 + continue;
3523 +
3524 + if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
3525 + /* increment the high counter */
3526 + reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
3527 reg = lower_32_bits(reg);
3528 - __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
3529 - if (!reg)
3530 - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
3531 + __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
3532 + if (!reg) /* mark overflow on the high counter */
3533 + __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
3534 + } else {
3535 + /* mark overflow on low counter */
3536 + __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
3537 }
3538 }
3539 }
3540 diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
3541 index 2be6b66b3856..f8ad7096555d 100644
3542 --- a/virt/kvm/arm/vgic/vgic-its.c
3543 +++ b/virt/kvm/arm/vgic/vgic-its.c
3544 @@ -2472,7 +2472,8 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
3545 target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
3546 coll_id = val & KVM_ITS_CTE_ICID_MASK;
3547
3548 - if (target_addr >= atomic_read(&kvm->online_vcpus))
3549 + if (target_addr != COLLECTION_NOT_MAPPED &&
3550 + target_addr >= atomic_read(&kvm->online_vcpus))
3551 return -EINVAL;
3552
3553 collection = find_collection(its, coll_id);