Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0172-4.19.73-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3469 - (show annotations) (download)
Tue Oct 29 10:31:29 2019 UTC (4 years, 6 months ago) by niro
File size: 301222 byte(s)
-linux-4.19.73
1 diff --git a/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt
2 new file mode 100644
3 index 000000000000..a30d63db3c8f
4 --- /dev/null
5 +++ b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt
6 @@ -0,0 +1,9 @@
7 +Armadeus ST0700 Adapt. A Santek ST0700I5Y-RBSLW 7.0" WVGA (800x480) TFT with
8 +an adapter board.
9 +
10 +Required properties:
11 +- compatible: "armadeus,st0700-adapt"
12 +- power-supply: see panel-common.txt
13 +
14 +Optional properties:
15 +- backlight: see panel-common.txt
16 diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
17 index 6c49db7f8ad2..e1fe02f3e3e9 100644
18 --- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
19 +++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
20 @@ -11,11 +11,13 @@ New driver handles the following
21
22 Required properties:
23 - compatible: Must be "samsung,exynos-adc-v1"
24 - for exynos4412/5250 and s5pv210 controllers.
25 + for Exynos5250 controllers.
26 Must be "samsung,exynos-adc-v2" for
27 future controllers.
28 Must be "samsung,exynos3250-adc" for
29 controllers compatible with ADC of Exynos3250.
30 + Must be "samsung,exynos4212-adc" for
31 + controllers compatible with ADC of Exynos4212 and Exynos4412.
32 Must be "samsung,exynos7-adc" for
33 the ADC in Exynos7 and compatibles
34 Must be "samsung,s3c2410-adc" for
35 @@ -28,6 +30,8 @@ Required properties:
36 the ADC in s3c2443 and compatibles
37 Must be "samsung,s3c6410-adc" for
38 the ADC in s3c6410 and compatibles
39 + Must be "samsung,s5pv210-adc" for
40 + the ADC in s5pv210 and compatibles
41 - reg: List of ADC register address range
42 - The base address and range of ADC register
43 - The base address and range of ADC_PHY register (every
44 diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
45 index f5a0923b34ca..c269dbe384fe 100644
46 --- a/Documentation/devicetree/bindings/mmc/mmc.txt
47 +++ b/Documentation/devicetree/bindings/mmc/mmc.txt
48 @@ -62,6 +62,10 @@ Optional properties:
49 be referred to mmc-pwrseq-simple.txt. But now it's reused as a tunable delay
50 waiting for I/O signalling and card power supply to be stable, regardless of
51 whether pwrseq-simple is used. Default to 10ms if no available.
52 +- supports-cqe : The presence of this property indicates that the corresponding
53 + MMC host controller supports HW command queue feature.
54 +- disable-cqe-dcmd: This property indicates that the MMC controller's command
55 + queue engine (CQE) does not support direct commands (DCMDs).
56
57 *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
58 polarity properties, we have to fix the meaning of the "normal" and "inverted"
59 diff --git a/Makefile b/Makefile
60 index ef80b1dfb753..9748fa3704bc 100644
61 --- a/Makefile
62 +++ b/Makefile
63 @@ -1,7 +1,7 @@
64 # SPDX-License-Identifier: GPL-2.0
65 VERSION = 4
66 PATCHLEVEL = 19
67 -SUBLEVEL = 72
68 +SUBLEVEL = 73
69 EXTRAVERSION =
70 NAME = "People's Front"
71
72 diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
73 index 5c6663321e87..215f515442e0 100644
74 --- a/arch/arc/kernel/troubleshoot.c
75 +++ b/arch/arc/kernel/troubleshoot.c
76 @@ -179,6 +179,12 @@ void show_regs(struct pt_regs *regs)
77 struct task_struct *tsk = current;
78 struct callee_regs *cregs;
79
80 + /*
81 + * generic code calls us with preemption disabled, but some calls
82 + * here could sleep, so re-enable to avoid lockdep splat
83 + */
84 + preempt_enable();
85 +
86 print_task_path_n_nm(tsk);
87 show_regs_print_info(KERN_INFO);
88
89 @@ -221,6 +227,8 @@ void show_regs(struct pt_regs *regs)
90 cregs = (struct callee_regs *)current->thread.callee_reg;
91 if (cregs)
92 show_callee_regs(cregs);
93 +
94 + preempt_disable();
95 }
96
97 void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
98 diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
99 index db6913094be3..4e8143de32e7 100644
100 --- a/arch/arc/mm/fault.c
101 +++ b/arch/arc/mm/fault.c
102 @@ -66,14 +66,12 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
103 struct vm_area_struct *vma = NULL;
104 struct task_struct *tsk = current;
105 struct mm_struct *mm = tsk->mm;
106 - siginfo_t info;
107 + int si_code = SEGV_MAPERR;
108 int ret;
109 vm_fault_t fault;
110 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
111 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
112
113 - clear_siginfo(&info);
114 -
115 /*
116 * We fault-in kernel-space virtual memory on-demand. The
117 * 'reference' page table is init_mm.pgd.
118 @@ -83,16 +81,14 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
119 * only copy the information from the master page table,
120 * nothing more.
121 */
122 - if (address >= VMALLOC_START) {
123 + if (address >= VMALLOC_START && !user_mode(regs)) {
124 ret = handle_kernel_vaddr_fault(address);
125 if (unlikely(ret))
126 - goto bad_area_nosemaphore;
127 + goto no_context;
128 else
129 return;
130 }
131
132 - info.si_code = SEGV_MAPERR;
133 -
134 /*
135 * If we're in an interrupt or have no user
136 * context, we must not take the fault..
137 @@ -119,7 +115,7 @@ retry:
138 * we can handle it..
139 */
140 good_area:
141 - info.si_code = SEGV_ACCERR;
142 + si_code = SEGV_ACCERR;
143
144 /* Handle protection violation, execute on heap or stack */
145
146 @@ -143,12 +139,17 @@ good_area:
147 */
148 fault = handle_mm_fault(vma, address, flags);
149
150 - /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
151 if (unlikely(fatal_signal_pending(current))) {
152 - if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
153 - up_read(&mm->mmap_sem);
154 - if (user_mode(regs))
155 +
156 + /*
157 + * if fault retry, mmap_sem already relinquished by core mm
158 + * so OK to return to user mode (with signal handled first)
159 + */
160 + if (fault & VM_FAULT_RETRY) {
161 + if (!user_mode(regs))
162 + goto no_context;
163 return;
164 + }
165 }
166
167 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
168 @@ -195,15 +196,10 @@ good_area:
169 bad_area:
170 up_read(&mm->mmap_sem);
171
172 -bad_area_nosemaphore:
173 /* User mode accesses just cause a SIGSEGV */
174 if (user_mode(regs)) {
175 tsk->thread.fault_address = address;
176 - info.si_signo = SIGSEGV;
177 - info.si_errno = 0;
178 - /* info.si_code has been set above */
179 - info.si_addr = (void __user *)address;
180 - force_sig_info(SIGSEGV, &info, tsk);
181 + force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk);
182 return;
183 }
184
185 @@ -238,9 +234,5 @@ do_sigbus:
186 goto no_context;
187
188 tsk->thread.fault_address = address;
189 - info.si_signo = SIGBUS;
190 - info.si_errno = 0;
191 - info.si_code = BUS_ADRERR;
192 - info.si_addr = (void __user *)address;
193 - force_sig_info(SIGBUS, &info, tsk);
194 + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk);
195 }
196 diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
197 index 502a361d1fe9..15d6157b661d 100644
198 --- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
199 +++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
200 @@ -65,7 +65,7 @@
201 gpio-miso = <&gpio1 8 GPIO_ACTIVE_HIGH>;
202 gpio-mosi = <&gpio1 7 GPIO_ACTIVE_HIGH>;
203 /* Collides with pflash CE1, not so cool */
204 - cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
205 + cs-gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
206 num-chipselects = <1>;
207
208 panel: display@0 {
209 diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
210 index 78db67337ed4..54d056b01bb5 100644
211 --- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
212 +++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
213 @@ -386,10 +386,10 @@
214 #address-cells = <3>;
215 #size-cells = <2>;
216
217 - ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000
218 - 0x82000000 0 0x48000000 0x48000000 0 0x10000000>;
219 + ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>,
220 + <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>;
221
222 - interrupts = <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>;
223 + interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
224 interrupt-names = "msi";
225 #interrupt-cells = <1>;
226 interrupt-map-mask = <0 0 0 0x7>;
227 diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
228 index 3c42bf9fa061..708931b47090 100644
229 --- a/arch/arm/mach-davinci/devices-da8xx.c
230 +++ b/arch/arm/mach-davinci/devices-da8xx.c
231 @@ -704,6 +704,46 @@ static struct resource da8xx_gpio_resources[] = {
232 },
233 { /* interrupt */
234 .start = IRQ_DA8XX_GPIO0,
235 + .end = IRQ_DA8XX_GPIO0,
236 + .flags = IORESOURCE_IRQ,
237 + },
238 + {
239 + .start = IRQ_DA8XX_GPIO1,
240 + .end = IRQ_DA8XX_GPIO1,
241 + .flags = IORESOURCE_IRQ,
242 + },
243 + {
244 + .start = IRQ_DA8XX_GPIO2,
245 + .end = IRQ_DA8XX_GPIO2,
246 + .flags = IORESOURCE_IRQ,
247 + },
248 + {
249 + .start = IRQ_DA8XX_GPIO3,
250 + .end = IRQ_DA8XX_GPIO3,
251 + .flags = IORESOURCE_IRQ,
252 + },
253 + {
254 + .start = IRQ_DA8XX_GPIO4,
255 + .end = IRQ_DA8XX_GPIO4,
256 + .flags = IORESOURCE_IRQ,
257 + },
258 + {
259 + .start = IRQ_DA8XX_GPIO5,
260 + .end = IRQ_DA8XX_GPIO5,
261 + .flags = IORESOURCE_IRQ,
262 + },
263 + {
264 + .start = IRQ_DA8XX_GPIO6,
265 + .end = IRQ_DA8XX_GPIO6,
266 + .flags = IORESOURCE_IRQ,
267 + },
268 + {
269 + .start = IRQ_DA8XX_GPIO7,
270 + .end = IRQ_DA8XX_GPIO7,
271 + .flags = IORESOURCE_IRQ,
272 + },
273 + {
274 + .start = IRQ_DA8XX_GPIO8,
275 .end = IRQ_DA8XX_GPIO8,
276 .flags = IORESOURCE_IRQ,
277 },
278 diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
279 index 9f7d38d12c88..2b0f5d97ab7c 100644
280 --- a/arch/arm/mach-davinci/dm355.c
281 +++ b/arch/arm/mach-davinci/dm355.c
282 @@ -548,6 +548,36 @@ static struct resource dm355_gpio_resources[] = {
283 },
284 { /* interrupt */
285 .start = IRQ_DM355_GPIOBNK0,
286 + .end = IRQ_DM355_GPIOBNK0,
287 + .flags = IORESOURCE_IRQ,
288 + },
289 + {
290 + .start = IRQ_DM355_GPIOBNK1,
291 + .end = IRQ_DM355_GPIOBNK1,
292 + .flags = IORESOURCE_IRQ,
293 + },
294 + {
295 + .start = IRQ_DM355_GPIOBNK2,
296 + .end = IRQ_DM355_GPIOBNK2,
297 + .flags = IORESOURCE_IRQ,
298 + },
299 + {
300 + .start = IRQ_DM355_GPIOBNK3,
301 + .end = IRQ_DM355_GPIOBNK3,
302 + .flags = IORESOURCE_IRQ,
303 + },
304 + {
305 + .start = IRQ_DM355_GPIOBNK4,
306 + .end = IRQ_DM355_GPIOBNK4,
307 + .flags = IORESOURCE_IRQ,
308 + },
309 + {
310 + .start = IRQ_DM355_GPIOBNK5,
311 + .end = IRQ_DM355_GPIOBNK5,
312 + .flags = IORESOURCE_IRQ,
313 + },
314 + {
315 + .start = IRQ_DM355_GPIOBNK6,
316 .end = IRQ_DM355_GPIOBNK6,
317 .flags = IORESOURCE_IRQ,
318 },
319 diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
320 index abcf2a5ed89b..42665914166a 100644
321 --- a/arch/arm/mach-davinci/dm365.c
322 +++ b/arch/arm/mach-davinci/dm365.c
323 @@ -267,6 +267,41 @@ static struct resource dm365_gpio_resources[] = {
324 },
325 { /* interrupt */
326 .start = IRQ_DM365_GPIO0,
327 + .end = IRQ_DM365_GPIO0,
328 + .flags = IORESOURCE_IRQ,
329 + },
330 + {
331 + .start = IRQ_DM365_GPIO1,
332 + .end = IRQ_DM365_GPIO1,
333 + .flags = IORESOURCE_IRQ,
334 + },
335 + {
336 + .start = IRQ_DM365_GPIO2,
337 + .end = IRQ_DM365_GPIO2,
338 + .flags = IORESOURCE_IRQ,
339 + },
340 + {
341 + .start = IRQ_DM365_GPIO3,
342 + .end = IRQ_DM365_GPIO3,
343 + .flags = IORESOURCE_IRQ,
344 + },
345 + {
346 + .start = IRQ_DM365_GPIO4,
347 + .end = IRQ_DM365_GPIO4,
348 + .flags = IORESOURCE_IRQ,
349 + },
350 + {
351 + .start = IRQ_DM365_GPIO5,
352 + .end = IRQ_DM365_GPIO5,
353 + .flags = IORESOURCE_IRQ,
354 + },
355 + {
356 + .start = IRQ_DM365_GPIO6,
357 + .end = IRQ_DM365_GPIO6,
358 + .flags = IORESOURCE_IRQ,
359 + },
360 + {
361 + .start = IRQ_DM365_GPIO7,
362 .end = IRQ_DM365_GPIO7,
363 .flags = IORESOURCE_IRQ,
364 },
365 diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
366 index 0720da7809a6..de1ec6dc01e9 100644
367 --- a/arch/arm/mach-davinci/dm644x.c
368 +++ b/arch/arm/mach-davinci/dm644x.c
369 @@ -492,6 +492,26 @@ static struct resource dm644_gpio_resources[] = {
370 },
371 { /* interrupt */
372 .start = IRQ_GPIOBNK0,
373 + .end = IRQ_GPIOBNK0,
374 + .flags = IORESOURCE_IRQ,
375 + },
376 + {
377 + .start = IRQ_GPIOBNK1,
378 + .end = IRQ_GPIOBNK1,
379 + .flags = IORESOURCE_IRQ,
380 + },
381 + {
382 + .start = IRQ_GPIOBNK2,
383 + .end = IRQ_GPIOBNK2,
384 + .flags = IORESOURCE_IRQ,
385 + },
386 + {
387 + .start = IRQ_GPIOBNK3,
388 + .end = IRQ_GPIOBNK3,
389 + .flags = IORESOURCE_IRQ,
390 + },
391 + {
392 + .start = IRQ_GPIOBNK4,
393 .end = IRQ_GPIOBNK4,
394 .flags = IORESOURCE_IRQ,
395 },
396 diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
397 index 6bd2ed069d0d..d9b93e2806d2 100644
398 --- a/arch/arm/mach-davinci/dm646x.c
399 +++ b/arch/arm/mach-davinci/dm646x.c
400 @@ -442,6 +442,16 @@ static struct resource dm646x_gpio_resources[] = {
401 },
402 { /* interrupt */
403 .start = IRQ_DM646X_GPIOBNK0,
404 + .end = IRQ_DM646X_GPIOBNK0,
405 + .flags = IORESOURCE_IRQ,
406 + },
407 + {
408 + .start = IRQ_DM646X_GPIOBNK1,
409 + .end = IRQ_DM646X_GPIOBNK1,
410 + .flags = IORESOURCE_IRQ,
411 + },
412 + {
413 + .start = IRQ_DM646X_GPIOBNK2,
414 .end = IRQ_DM646X_GPIOBNK2,
415 .flags = IORESOURCE_IRQ,
416 },
417 diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
418 index 5089aa64088f..9a1ea8a46405 100644
419 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
420 +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
421 @@ -140,6 +140,7 @@
422 tx-fifo-depth = <16384>;
423 rx-fifo-depth = <16384>;
424 snps,multicast-filter-bins = <256>;
425 + altr,sysmgr-syscon = <&sysmgr 0x44 0>;
426 status = "disabled";
427 };
428
429 @@ -156,6 +157,7 @@
430 tx-fifo-depth = <16384>;
431 rx-fifo-depth = <16384>;
432 snps,multicast-filter-bins = <256>;
433 + altr,sysmgr-syscon = <&sysmgr 0x48 0>;
434 status = "disabled";
435 };
436
437 @@ -172,6 +174,7 @@
438 tx-fifo-depth = <16384>;
439 rx-fifo-depth = <16384>;
440 snps,multicast-filter-bins = <256>;
441 + altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
442 status = "disabled";
443 };
444
445 diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
446 index c142169a58fc..e9147e35b739 100644
447 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
448 +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
449 @@ -40,6 +40,7 @@
450 pinctrl-0 = <&usb30_host_drv>;
451 regulator-name = "vcc_host_5v";
452 regulator-always-on;
453 + regulator-boot-on;
454 vin-supply = <&vcc_sys>;
455 };
456
457 @@ -50,6 +51,7 @@
458 pinctrl-0 = <&usb20_host_drv>;
459 regulator-name = "vcc_host1_5v";
460 regulator-always-on;
461 + regulator-boot-on;
462 vin-supply = <&vcc_sys>;
463 };
464
465 diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
466 index 83a9aa3cf689..dd18d8174504 100644
467 --- a/arch/powerpc/include/asm/kvm_book3s.h
468 +++ b/arch/powerpc/include/asm/kvm_book3s.h
469 @@ -301,12 +301,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
470
471 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
472 {
473 - vcpu->arch.cr = val;
474 + vcpu->arch.regs.ccr = val;
475 }
476
477 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
478 {
479 - return vcpu->arch.cr;
480 + return vcpu->arch.regs.ccr;
481 }
482
483 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
484 diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
485 index dc435a5af7d6..14fa07c73f44 100644
486 --- a/arch/powerpc/include/asm/kvm_book3s_64.h
487 +++ b/arch/powerpc/include/asm/kvm_book3s_64.h
488 @@ -482,7 +482,7 @@ static inline u64 sanitize_msr(u64 msr)
489 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
490 static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
491 {
492 - vcpu->arch.cr = vcpu->arch.cr_tm;
493 + vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
494 vcpu->arch.regs.xer = vcpu->arch.xer_tm;
495 vcpu->arch.regs.link = vcpu->arch.lr_tm;
496 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
497 @@ -499,7 +499,7 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
498
499 static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
500 {
501 - vcpu->arch.cr_tm = vcpu->arch.cr;
502 + vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
503 vcpu->arch.xer_tm = vcpu->arch.regs.xer;
504 vcpu->arch.lr_tm = vcpu->arch.regs.link;
505 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
506 diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
507 index d513e3ed1c65..f0cef625f17c 100644
508 --- a/arch/powerpc/include/asm/kvm_booke.h
509 +++ b/arch/powerpc/include/asm/kvm_booke.h
510 @@ -46,12 +46,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
511
512 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
513 {
514 - vcpu->arch.cr = val;
515 + vcpu->arch.regs.ccr = val;
516 }
517
518 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
519 {
520 - return vcpu->arch.cr;
521 + return vcpu->arch.regs.ccr;
522 }
523
524 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
525 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
526 index 2b6049e83970..2f95e38f0549 100644
527 --- a/arch/powerpc/include/asm/kvm_host.h
528 +++ b/arch/powerpc/include/asm/kvm_host.h
529 @@ -538,8 +538,6 @@ struct kvm_vcpu_arch {
530 ulong tar;
531 #endif
532
533 - u32 cr;
534 -
535 #ifdef CONFIG_PPC_BOOK3S
536 ulong hflags;
537 ulong guest_owned_ext;
538 diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
539 index b694d6af1150..ae953958c0f3 100644
540 --- a/arch/powerpc/include/asm/mmu_context.h
541 +++ b/arch/powerpc/include/asm/mmu_context.h
542 @@ -217,12 +217,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
543 #endif
544 }
545
546 -static inline int arch_dup_mmap(struct mm_struct *oldmm,
547 - struct mm_struct *mm)
548 -{
549 - return 0;
550 -}
551 -
552 #ifndef CONFIG_PPC_BOOK3S_64
553 static inline void arch_exit_mmap(struct mm_struct *mm)
554 {
555 @@ -247,6 +241,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
556 #ifdef CONFIG_PPC_MEM_KEYS
557 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
558 bool execute, bool foreign);
559 +void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
560 #else /* CONFIG_PPC_MEM_KEYS */
561 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
562 bool write, bool execute, bool foreign)
563 @@ -259,6 +254,7 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
564 #define thread_pkey_regs_save(thread)
565 #define thread_pkey_regs_restore(new_thread, old_thread)
566 #define thread_pkey_regs_init(thread)
567 +#define arch_dup_pkeys(oldmm, mm)
568
569 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
570 {
571 @@ -267,5 +263,12 @@ static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
572
573 #endif /* CONFIG_PPC_MEM_KEYS */
574
575 +static inline int arch_dup_mmap(struct mm_struct *oldmm,
576 + struct mm_struct *mm)
577 +{
578 + arch_dup_pkeys(oldmm, mm);
579 + return 0;
580 +}
581 +
582 #endif /* __KERNEL__ */
583 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
584 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
585 index e5b314ed054e..640a4d818772 100644
586 --- a/arch/powerpc/include/asm/reg.h
587 +++ b/arch/powerpc/include/asm/reg.h
588 @@ -118,11 +118,16 @@
589 #define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */
590 #define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
591 #define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
592 -#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
593 #define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
594 #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
595 #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
596
597 +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
598 +#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
599 +#else
600 +#define MSR_TM_ACTIVE(x) 0
601 +#endif
602 +
603 #if defined(CONFIG_PPC_BOOK3S_64)
604 #define MSR_64BIT MSR_SF
605
606 diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
607 index 89cf15566c4e..7c3738d890e8 100644
608 --- a/arch/powerpc/kernel/asm-offsets.c
609 +++ b/arch/powerpc/kernel/asm-offsets.c
610 @@ -438,7 +438,7 @@ int main(void)
611 #ifdef CONFIG_PPC_BOOK3S
612 OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
613 #endif
614 - OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
615 + OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
616 OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
617 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
618 OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
619 @@ -695,7 +695,7 @@ int main(void)
620 #endif /* CONFIG_PPC_BOOK3S_64 */
621
622 #else /* CONFIG_PPC_BOOK3S */
623 - OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
624 + OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
625 OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
626 OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
627 OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
628 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
629 index 9168a247e24f..3fb564f3e887 100644
630 --- a/arch/powerpc/kernel/head_64.S
631 +++ b/arch/powerpc/kernel/head_64.S
632 @@ -906,6 +906,7 @@ p_toc: .8byte __toc_start + 0x8000 - 0b
633 /*
634 * This is where the main kernel code starts.
635 */
636 +__REF
637 start_here_multiplatform:
638 /* set up the TOC */
639 bl relative_toc
640 @@ -981,6 +982,7 @@ start_here_multiplatform:
641 RFI
642 b . /* prevent speculative execution */
643
644 + .previous
645 /* This is where all platforms converge execution */
646
647 start_here_common:
648 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
649 index d29f2dca725b..909c9407e392 100644
650 --- a/arch/powerpc/kernel/process.c
651 +++ b/arch/powerpc/kernel/process.c
652 @@ -102,27 +102,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
653 }
654 }
655
656 -static inline bool msr_tm_active(unsigned long msr)
657 -{
658 - return MSR_TM_ACTIVE(msr);
659 -}
660 -
661 -static bool tm_active_with_fp(struct task_struct *tsk)
662 -{
663 - return msr_tm_active(tsk->thread.regs->msr) &&
664 - (tsk->thread.ckpt_regs.msr & MSR_FP);
665 -}
666 -
667 -static bool tm_active_with_altivec(struct task_struct *tsk)
668 -{
669 - return msr_tm_active(tsk->thread.regs->msr) &&
670 - (tsk->thread.ckpt_regs.msr & MSR_VEC);
671 -}
672 #else
673 -static inline bool msr_tm_active(unsigned long msr) { return false; }
674 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
675 -static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
676 -static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
677 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
678
679 bool strict_msr_control;
680 @@ -247,7 +228,8 @@ void enable_kernel_fp(void)
681 * giveup as this would save to the 'live' structure not the
682 * checkpointed structure.
683 */
684 - if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
685 + if (!MSR_TM_ACTIVE(cpumsr) &&
686 + MSR_TM_ACTIVE(current->thread.regs->msr))
687 return;
688 __giveup_fpu(current);
689 }
690 @@ -256,7 +238,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
691
692 static int restore_fp(struct task_struct *tsk)
693 {
694 - if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
695 + if (tsk->thread.load_fp) {
696 load_fp_state(&current->thread.fp_state);
697 current->thread.load_fp++;
698 return 1;
699 @@ -311,7 +293,8 @@ void enable_kernel_altivec(void)
700 * giveup as this would save to the 'live' structure not the
701 * checkpointed structure.
702 */
703 - if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
704 + if (!MSR_TM_ACTIVE(cpumsr) &&
705 + MSR_TM_ACTIVE(current->thread.regs->msr))
706 return;
707 __giveup_altivec(current);
708 }
709 @@ -337,8 +320,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
710
711 static int restore_altivec(struct task_struct *tsk)
712 {
713 - if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
714 - (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
715 + if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
716 load_vr_state(&tsk->thread.vr_state);
717 tsk->thread.used_vr = 1;
718 tsk->thread.load_vec++;
719 @@ -397,7 +379,8 @@ void enable_kernel_vsx(void)
720 * giveup as this would save to the 'live' structure not the
721 * checkpointed structure.
722 */
723 - if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
724 + if (!MSR_TM_ACTIVE(cpumsr) &&
725 + MSR_TM_ACTIVE(current->thread.regs->msr))
726 return;
727 __giveup_vsx(current);
728 }
729 @@ -499,13 +482,14 @@ void giveup_all(struct task_struct *tsk)
730 if (!tsk->thread.regs)
731 return;
732
733 + check_if_tm_restore_required(tsk);
734 +
735 usermsr = tsk->thread.regs->msr;
736
737 if ((usermsr & msr_all_available) == 0)
738 return;
739
740 msr_check_and_set(msr_all_available);
741 - check_if_tm_restore_required(tsk);
742
743 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
744
745 @@ -530,7 +514,7 @@ void restore_math(struct pt_regs *regs)
746 {
747 unsigned long msr;
748
749 - if (!msr_tm_active(regs->msr) &&
750 + if (!MSR_TM_ACTIVE(regs->msr) &&
751 !current->thread.load_fp && !loadvec(current->thread))
752 return;
753
754 diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
755 index 68e14afecac8..a488c105b923 100644
756 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
757 +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
758 @@ -744,12 +744,15 @@ void kvmppc_rmap_reset(struct kvm *kvm)
759 srcu_idx = srcu_read_lock(&kvm->srcu);
760 slots = kvm_memslots(kvm);
761 kvm_for_each_memslot(memslot, slots) {
762 + /* Mutual exclusion with kvm_unmap_hva_range etc. */
763 + spin_lock(&kvm->mmu_lock);
764 /*
765 * This assumes it is acceptable to lose reference and
766 * change bits across a reset.
767 */
768 memset(memslot->arch.rmap, 0,
769 memslot->npages * sizeof(*memslot->arch.rmap));
770 + spin_unlock(&kvm->mmu_lock);
771 }
772 srcu_read_unlock(&kvm->srcu, srcu_idx);
773 }
774 diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
775 index 36b11c5a0dbb..2654df220d05 100644
776 --- a/arch/powerpc/kvm/book3s_emulate.c
777 +++ b/arch/powerpc/kvm/book3s_emulate.c
778 @@ -110,7 +110,7 @@ static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
779 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
780 vcpu->arch.tar_tm = vcpu->arch.tar;
781 vcpu->arch.lr_tm = vcpu->arch.regs.link;
782 - vcpu->arch.cr_tm = vcpu->arch.cr;
783 + vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
784 vcpu->arch.xer_tm = vcpu->arch.regs.xer;
785 vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
786 }
787 @@ -129,7 +129,7 @@ static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
788 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
789 vcpu->arch.tar = vcpu->arch.tar_tm;
790 vcpu->arch.regs.link = vcpu->arch.lr_tm;
791 - vcpu->arch.cr = vcpu->arch.cr_tm;
792 + vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
793 vcpu->arch.regs.xer = vcpu->arch.xer_tm;
794 vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
795 }
796 @@ -141,7 +141,7 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
797 uint64_t texasr;
798
799 /* CR0 = 0 | MSR[TS] | 0 */
800 - vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
801 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
802 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
803 << CR0_SHIFT);
804
805 @@ -220,7 +220,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
806 tm_abort(ra_val);
807
808 /* CR0 = 0 | MSR[TS] | 0 */
809 - vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
810 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
811 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
812 << CR0_SHIFT);
813
814 @@ -494,8 +494,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
815
816 if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
817 preempt_disable();
818 - vcpu->arch.cr = (CR0_TBEGIN_FAILURE |
819 - (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)));
820 + vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
821 + (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
822
823 vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
824 (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
825 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
826 index 083dcedba11c..05b32cc12e41 100644
827 --- a/arch/powerpc/kvm/book3s_hv.c
828 +++ b/arch/powerpc/kvm/book3s_hv.c
829 @@ -410,8 +410,8 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
830 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
831 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
832 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
833 - pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
834 - vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
835 + pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n",
836 + vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
837 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
838 pr_err("fault dar = %.16lx dsisr = %.8x\n",
839 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
840 @@ -3813,12 +3813,15 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
841 /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
842 int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
843 {
844 + kvmppc_rmap_reset(kvm);
845 + kvm->arch.process_table = 0;
846 + /* Mutual exclusion with kvm_unmap_hva_range etc. */
847 + spin_lock(&kvm->mmu_lock);
848 + kvm->arch.radix = 0;
849 + spin_unlock(&kvm->mmu_lock);
850 kvmppc_free_radix(kvm);
851 kvmppc_update_lpcr(kvm, LPCR_VPM1,
852 LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
853 - kvmppc_rmap_reset(kvm);
854 - kvm->arch.radix = 0;
855 - kvm->arch.process_table = 0;
856 return 0;
857 }
858
859 @@ -3831,10 +3834,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
860 if (err)
861 return err;
862
863 + kvmppc_rmap_reset(kvm);
864 + /* Mutual exclusion with kvm_unmap_hva_range etc. */
865 + spin_lock(&kvm->mmu_lock);
866 + kvm->arch.radix = 1;
867 + spin_unlock(&kvm->mmu_lock);
868 kvmppc_free_hpt(&kvm->arch.hpt);
869 kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
870 LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
871 - kvm->arch.radix = 1;
872 return 0;
873 }
874
875 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
876 index 1d14046124a0..68c7591f2b5f 100644
877 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
878 +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
879 @@ -56,6 +56,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
880 #define STACK_SLOT_DAWR (SFS-56)
881 #define STACK_SLOT_DAWRX (SFS-64)
882 #define STACK_SLOT_HFSCR (SFS-72)
883 +#define STACK_SLOT_AMR (SFS-80)
884 +#define STACK_SLOT_UAMOR (SFS-88)
885
886 /*
887 * Call kvmppc_hv_entry in real mode.
888 @@ -760,11 +762,9 @@ BEGIN_FTR_SECTION
889 mfspr r5, SPRN_TIDR
890 mfspr r6, SPRN_PSSCR
891 mfspr r7, SPRN_PID
892 - mfspr r8, SPRN_IAMR
893 std r5, STACK_SLOT_TID(r1)
894 std r6, STACK_SLOT_PSSCR(r1)
895 std r7, STACK_SLOT_PID(r1)
896 - std r8, STACK_SLOT_IAMR(r1)
897 mfspr r5, SPRN_HFSCR
898 std r5, STACK_SLOT_HFSCR(r1)
899 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
900 @@ -772,11 +772,18 @@ BEGIN_FTR_SECTION
901 mfspr r5, SPRN_CIABR
902 mfspr r6, SPRN_DAWR
903 mfspr r7, SPRN_DAWRX
904 + mfspr r8, SPRN_IAMR
905 std r5, STACK_SLOT_CIABR(r1)
906 std r6, STACK_SLOT_DAWR(r1)
907 std r7, STACK_SLOT_DAWRX(r1)
908 + std r8, STACK_SLOT_IAMR(r1)
909 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
910
911 + mfspr r5, SPRN_AMR
912 + std r5, STACK_SLOT_AMR(r1)
913 + mfspr r6, SPRN_UAMOR
914 + std r6, STACK_SLOT_UAMOR(r1)
915 +
916 BEGIN_FTR_SECTION
917 /* Set partition DABR */
918 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
919 @@ -1202,7 +1209,7 @@ BEGIN_FTR_SECTION
920 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
921
922 ld r5, VCPU_LR(r4)
923 - lwz r6, VCPU_CR(r4)
924 + ld r6, VCPU_CR(r4)
925 mtlr r5
926 mtcr r6
927
928 @@ -1313,7 +1320,7 @@ kvmppc_interrupt_hv:
929 std r3, VCPU_GPR(R12)(r9)
930 /* CR is in the high half of r12 */
931 srdi r4, r12, 32
932 - stw r4, VCPU_CR(r9)
933 + std r4, VCPU_CR(r9)
934 BEGIN_FTR_SECTION
935 ld r3, HSTATE_CFAR(r13)
936 std r3, VCPU_CFAR(r9)
937 @@ -1713,22 +1720,25 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
938 mtspr SPRN_PSPB, r0
939 mtspr SPRN_WORT, r0
940 BEGIN_FTR_SECTION
941 - mtspr SPRN_IAMR, r0
942 mtspr SPRN_TCSCR, r0
943 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
944 li r0, 1
945 sldi r0, r0, 31
946 mtspr SPRN_MMCRS, r0
947 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
948 -8:
949
950 - /* Save and reset AMR and UAMOR before turning on the MMU */
951 + /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
952 + ld r8, STACK_SLOT_IAMR(r1)
953 + mtspr SPRN_IAMR, r8
954 +
955 +8: /* Power7 jumps back in here */
956 mfspr r5,SPRN_AMR
957 mfspr r6,SPRN_UAMOR
958 std r5,VCPU_AMR(r9)
959 std r6,VCPU_UAMOR(r9)
960 - li r6,0
961 - mtspr SPRN_AMR,r6
962 + ld r5,STACK_SLOT_AMR(r1)
963 + ld r6,STACK_SLOT_UAMOR(r1)
964 + mtspr SPRN_AMR, r5
965 mtspr SPRN_UAMOR, r6
966
967 /* Switch DSCR back to host value */
968 @@ -1897,11 +1907,9 @@ BEGIN_FTR_SECTION
969 ld r5, STACK_SLOT_TID(r1)
970 ld r6, STACK_SLOT_PSSCR(r1)
971 ld r7, STACK_SLOT_PID(r1)
972 - ld r8, STACK_SLOT_IAMR(r1)
973 mtspr SPRN_TIDR, r5
974 mtspr SPRN_PSSCR, r6
975 mtspr SPRN_PID, r7
976 - mtspr SPRN_IAMR, r8
977 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
978
979 #ifdef CONFIG_PPC_RADIX_MMU
980 diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
981 index 008285058f9b..31cd0f327c8a 100644
982 --- a/arch/powerpc/kvm/book3s_hv_tm.c
983 +++ b/arch/powerpc/kvm/book3s_hv_tm.c
984 @@ -130,8 +130,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
985 return RESUME_GUEST;
986 }
987 /* Set CR0 to indicate previous transactional state */
988 - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
989 - (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
990 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
991 + (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
992 /* L=1 => tresume, L=0 => tsuspend */
993 if (instr & (1 << 21)) {
994 if (MSR_TM_SUSPENDED(msr))
995 @@ -174,8 +174,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
996 copy_from_checkpoint(vcpu);
997
998 /* Set CR0 to indicate previous transactional state */
999 - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
1000 - (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
1001 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
1002 + (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
1003 vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
1004 return RESUME_GUEST;
1005
1006 @@ -204,8 +204,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
1007 copy_to_checkpoint(vcpu);
1008
1009 /* Set CR0 to indicate previous transactional state */
1010 - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
1011 - (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
1012 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
1013 + (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
1014 vcpu->arch.shregs.msr = msr | MSR_TS_S;
1015 return RESUME_GUEST;
1016 }
1017 diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
1018 index b2c7c6fca4f9..3cf5863bc06e 100644
1019 --- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
1020 +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
1021 @@ -89,7 +89,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
1022 if (instr & (1 << 21))
1023 vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
1024 /* Set CR0 to 0b0010 */
1025 - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000;
1026 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
1027 + 0x20000000;
1028 return 1;
1029 }
1030
1031 @@ -105,5 +106,5 @@ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
1032 vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */
1033 vcpu->arch.regs.nip = vcpu->arch.tfhar;
1034 copy_from_checkpoint(vcpu);
1035 - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
1036 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000;
1037 }
1038 diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
1039 index 614ebb4261f7..de9702219dee 100644
1040 --- a/arch/powerpc/kvm/book3s_pr.c
1041 +++ b/arch/powerpc/kvm/book3s_pr.c
1042 @@ -167,7 +167,7 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
1043 svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
1044 svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
1045 svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
1046 - svcpu->cr = vcpu->arch.cr;
1047 + svcpu->cr = vcpu->arch.regs.ccr;
1048 svcpu->xer = vcpu->arch.regs.xer;
1049 svcpu->ctr = vcpu->arch.regs.ctr;
1050 svcpu->lr = vcpu->arch.regs.link;
1051 @@ -249,7 +249,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
1052 vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
1053 vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
1054 vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
1055 - vcpu->arch.cr = svcpu->cr;
1056 + vcpu->arch.regs.ccr = svcpu->cr;
1057 vcpu->arch.regs.xer = svcpu->xer;
1058 vcpu->arch.regs.ctr = svcpu->ctr;
1059 vcpu->arch.regs.link = svcpu->lr;
1060 diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
1061 index 612b7f6a887f..4e5081e58409 100644
1062 --- a/arch/powerpc/kvm/bookehv_interrupts.S
1063 +++ b/arch/powerpc/kvm/bookehv_interrupts.S
1064 @@ -186,7 +186,7 @@ END_BTB_FLUSH_SECTION
1065 */
1066 PPC_LL r4, PACACURRENT(r13)
1067 PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4)
1068 - stw r10, VCPU_CR(r4)
1069 + PPC_STL r10, VCPU_CR(r4)
1070 PPC_STL r11, VCPU_GPR(R4)(r4)
1071 PPC_STL r5, VCPU_GPR(R5)(r4)
1072 PPC_STL r6, VCPU_GPR(R6)(r4)
1073 @@ -296,7 +296,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
1074 PPC_STL r4, VCPU_GPR(R4)(r11)
1075 PPC_LL r4, THREAD_NORMSAVE(0)(r10)
1076 PPC_STL r5, VCPU_GPR(R5)(r11)
1077 - stw r13, VCPU_CR(r11)
1078 + PPC_STL r13, VCPU_CR(r11)
1079 mfspr r5, \srr0
1080 PPC_STL r3, VCPU_GPR(R10)(r11)
1081 PPC_LL r3, THREAD_NORMSAVE(2)(r10)
1082 @@ -323,7 +323,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
1083 PPC_STL r4, VCPU_GPR(R4)(r11)
1084 PPC_LL r4, GPR9(r8)
1085 PPC_STL r5, VCPU_GPR(R5)(r11)
1086 - stw r9, VCPU_CR(r11)
1087 + PPC_STL r9, VCPU_CR(r11)
1088 mfspr r5, \srr0
1089 PPC_STL r3, VCPU_GPR(R8)(r11)
1090 PPC_LL r3, GPR10(r8)
1091 @@ -647,7 +647,7 @@ lightweight_exit:
1092 PPC_LL r3, VCPU_LR(r4)
1093 PPC_LL r5, VCPU_XER(r4)
1094 PPC_LL r6, VCPU_CTR(r4)
1095 - lwz r7, VCPU_CR(r4)
1096 + PPC_LL r7, VCPU_CR(r4)
1097 PPC_LL r8, VCPU_PC(r4)
1098 PPC_LD(r9, VCPU_SHARED_MSR, r11)
1099 PPC_LL r0, VCPU_GPR(R0)(r4)
1100 diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
1101 index 75dce1ef3bc8..f91b1309a0a8 100644
1102 --- a/arch/powerpc/kvm/emulate_loadstore.c
1103 +++ b/arch/powerpc/kvm/emulate_loadstore.c
1104 @@ -117,7 +117,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
1105
1106 emulated = EMULATE_FAIL;
1107 vcpu->arch.regs.msr = vcpu->arch.shared->msr;
1108 - vcpu->arch.regs.ccr = vcpu->arch.cr;
1109 if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
1110 int type = op.type & INSTR_TYPE_MASK;
1111 int size = GETSIZE(op.type);
1112 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
1113 index f23a89d8e4ce..29fd8940867e 100644
1114 --- a/arch/powerpc/mm/hash_utils_64.c
1115 +++ b/arch/powerpc/mm/hash_utils_64.c
1116 @@ -1859,11 +1859,20 @@ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
1117 *
1118 * For guests on platforms before POWER9, we clamp the it limit to 1G
1119 * to avoid some funky things such as RTAS bugs etc...
1120 + *
1121 + * On POWER9 we limit to 1TB in case the host erroneously told us that
1122 + * the RMA was >1TB. Effective address bits 0:23 are treated as zero
1123 + * (meaning the access is aliased to zero i.e. addr = addr % 1TB)
1124 + * for virtual real mode addressing and so it doesn't make sense to
1125 + * have an area larger than 1TB as it can't be addressed.
1126 */
1127 if (!early_cpu_has_feature(CPU_FTR_HVMODE)) {
1128 ppc64_rma_size = first_memblock_size;
1129 if (!early_cpu_has_feature(CPU_FTR_ARCH_300))
1130 ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000);
1131 + else
1132 + ppc64_rma_size = min_t(u64, ppc64_rma_size,
1133 + 1UL << SID_SHIFT_1T);
1134
1135 /* Finally limit subsequent allocations */
1136 memblock_set_current_limit(ppc64_rma_size);
1137 diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
1138 index b271b283c785..25a8dd9cd71d 100644
1139 --- a/arch/powerpc/mm/pkeys.c
1140 +++ b/arch/powerpc/mm/pkeys.c
1141 @@ -414,3 +414,13 @@ bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
1142
1143 return pkey_access_permitted(vma_pkey(vma), write, execute);
1144 }
1145 +
1146 +void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
1147 +{
1148 + if (static_branch_likely(&pkey_disabled))
1149 + return;
1150 +
1151 + /* Duplicate the oldmm pkey state in mm: */
1152 + mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm);
1153 + mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
1154 +}
1155 diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
1156 index c433f6d3dd64..a840b7d074f7 100644
1157 --- a/arch/riscv/kernel/ftrace.c
1158 +++ b/arch/riscv/kernel/ftrace.c
1159 @@ -132,7 +132,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
1160 {
1161 unsigned long return_hooker = (unsigned long)&return_to_handler;
1162 unsigned long old;
1163 - int err;
1164
1165 if (unlikely(atomic_read(&current->tracing_graph_pause)))
1166 return;
1167 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
1168 index 3245b95ad2d9..0d3f5cf3ff3e 100644
1169 --- a/arch/x86/include/asm/kvm_host.h
1170 +++ b/arch/x86/include/asm/kvm_host.h
1171 @@ -117,7 +117,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
1172 }
1173
1174 #define KVM_PERMILLE_MMU_PAGES 20
1175 -#define KVM_MIN_ALLOC_MMU_PAGES 64
1176 +#define KVM_MIN_ALLOC_MMU_PAGES 64UL
1177 #define KVM_MMU_HASH_SHIFT 12
1178 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
1179 #define KVM_MIN_FREE_MMU_PAGES 5
1180 @@ -784,6 +784,9 @@ struct kvm_hv {
1181 u64 hv_reenlightenment_control;
1182 u64 hv_tsc_emulation_control;
1183 u64 hv_tsc_emulation_status;
1184 +
1185 + /* How many vCPUs have VP index != vCPU index */
1186 + atomic_t num_mismatched_vp_indexes;
1187 };
1188
1189 enum kvm_irqchip_mode {
1190 @@ -793,9 +796,9 @@ enum kvm_irqchip_mode {
1191 };
1192
1193 struct kvm_arch {
1194 - unsigned int n_used_mmu_pages;
1195 - unsigned int n_requested_mmu_pages;
1196 - unsigned int n_max_mmu_pages;
1197 + unsigned long n_used_mmu_pages;
1198 + unsigned long n_requested_mmu_pages;
1199 + unsigned long n_max_mmu_pages;
1200 unsigned int indirect_shadow_pages;
1201 unsigned long mmu_valid_gen;
1202 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
1203 @@ -1198,8 +1201,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1204 gfn_t gfn_offset, unsigned long mask);
1205 void kvm_mmu_zap_all(struct kvm *kvm);
1206 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1207 -unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
1208 -void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
1209 +unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
1210 +void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
1211
1212 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1213 bool pdptrs_changed(struct kvm_vcpu *vcpu);
1214 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
1215 index 50d309662d78..5790671857e5 100644
1216 --- a/arch/x86/kernel/ftrace.c
1217 +++ b/arch/x86/kernel/ftrace.c
1218 @@ -53,7 +53,7 @@ int ftrace_arch_code_modify_post_process(void)
1219 union ftrace_code_union {
1220 char code[MCOUNT_INSN_SIZE];
1221 struct {
1222 - unsigned char e8;
1223 + unsigned char op;
1224 int offset;
1225 } __attribute__((packed));
1226 };
1227 @@ -63,20 +63,23 @@ static int ftrace_calc_offset(long ip, long addr)
1228 return (int)(addr - ip);
1229 }
1230
1231 -static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
1232 +static unsigned char *
1233 +ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr)
1234 {
1235 static union ftrace_code_union calc;
1236
1237 - calc.e8 = 0xe8;
1238 + calc.op = op;
1239 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
1240
1241 - /*
1242 - * No locking needed, this must be called via kstop_machine
1243 - * which in essence is like running on a uniprocessor machine.
1244 - */
1245 return calc.code;
1246 }
1247
1248 +static unsigned char *
1249 +ftrace_call_replace(unsigned long ip, unsigned long addr)
1250 +{
1251 + return ftrace_text_replace(0xe8, ip, addr);
1252 +}
1253 +
1254 static inline int
1255 within(unsigned long addr, unsigned long start, unsigned long end)
1256 {
1257 @@ -686,22 +689,6 @@ int __init ftrace_dyn_arch_init(void)
1258 return 0;
1259 }
1260
1261 -#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
1262 -static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
1263 -{
1264 - static union ftrace_code_union calc;
1265 -
1266 - /* Jmp not a call (ignore the .e8) */
1267 - calc.e8 = 0xe9;
1268 - calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
1269 -
1270 - /*
1271 - * ftrace external locks synchronize the access to the static variable.
1272 - */
1273 - return calc.code;
1274 -}
1275 -#endif
1276 -
1277 /* Currently only x86_64 supports dynamic trampolines */
1278 #ifdef CONFIG_X86_64
1279
1280 @@ -923,8 +910,8 @@ static void *addr_from_call(void *ptr)
1281 return NULL;
1282
1283 /* Make sure this is a call */
1284 - if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
1285 - pr_warn("Expected e8, got %x\n", calc.e8);
1286 + if (WARN_ON_ONCE(calc.op != 0xe8)) {
1287 + pr_warn("Expected e8, got %x\n", calc.op);
1288 return NULL;
1289 }
1290
1291 @@ -995,6 +982,11 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
1292 #ifdef CONFIG_DYNAMIC_FTRACE
1293 extern void ftrace_graph_call(void);
1294
1295 +static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
1296 +{
1297 + return ftrace_text_replace(0xe9, ip, addr);
1298 +}
1299 +
1300 static int ftrace_mod_jmp(unsigned long ip, void *func)
1301 {
1302 unsigned char *new;
1303 diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
1304 index 013fe3d21dbb..2ec202cb9dfd 100644
1305 --- a/arch/x86/kernel/kvmclock.c
1306 +++ b/arch/x86/kernel/kvmclock.c
1307 @@ -117,12 +117,8 @@ static u64 kvm_sched_clock_read(void)
1308
1309 static inline void kvm_sched_clock_init(bool stable)
1310 {
1311 - if (!stable) {
1312 - pv_time_ops.sched_clock = kvm_clock_read;
1313 + if (!stable)
1314 clear_sched_clock_stable();
1315 - return;
1316 - }
1317 -
1318 kvm_sched_clock_offset = kvm_clock_read();
1319 pv_time_ops.sched_clock = kvm_sched_clock_read;
1320
1321 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
1322 index b4866badb235..90ecc108bc8a 100644
1323 --- a/arch/x86/kernel/setup.c
1324 +++ b/arch/x86/kernel/setup.c
1325 @@ -1251,7 +1251,7 @@ void __init setup_arch(char **cmdline_p)
1326 x86_init.hyper.guest_late_init();
1327
1328 e820__reserve_resources();
1329 - e820__register_nosave_regions(max_low_pfn);
1330 + e820__register_nosave_regions(max_pfn);
1331
1332 x86_init.resources.reserve_resources();
1333
1334 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
1335 index 4a688ef9e448..429728b35bca 100644
1336 --- a/arch/x86/kvm/emulate.c
1337 +++ b/arch/x86/kvm/emulate.c
1338 @@ -2331,12 +2331,16 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
1339
1340 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
1341 {
1342 +#ifdef CONFIG_X86_64
1343 u32 eax, ebx, ecx, edx;
1344
1345 eax = 0x80000001;
1346 ecx = 0;
1347 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
1348 return edx & bit(X86_FEATURE_LM);
1349 +#else
1350 + return false;
1351 +#endif
1352 }
1353
1354 #define GET_SMSTATE(type, smbase, offset) \
1355 @@ -2381,6 +2385,7 @@ static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
1356 return X86EMUL_CONTINUE;
1357 }
1358
1359 +#ifdef CONFIG_X86_64
1360 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
1361 {
1362 struct desc_struct desc;
1363 @@ -2399,6 +2404,7 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
1364 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
1365 return X86EMUL_CONTINUE;
1366 }
1367 +#endif
1368
1369 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
1370 u64 cr0, u64 cr3, u64 cr4)
1371 @@ -2499,6 +2505,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
1372 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
1373 }
1374
1375 +#ifdef CONFIG_X86_64
1376 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
1377 {
1378 struct desc_struct desc;
1379 @@ -2560,6 +2567,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
1380
1381 return X86EMUL_CONTINUE;
1382 }
1383 +#endif
1384
1385 static int em_rsm(struct x86_emulate_ctxt *ctxt)
1386 {
1387 @@ -2616,9 +2624,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
1388 if (ctxt->ops->pre_leave_smm(ctxt, smbase))
1389 return X86EMUL_UNHANDLEABLE;
1390
1391 +#ifdef CONFIG_X86_64
1392 if (emulator_has_longmode(ctxt))
1393 ret = rsm_load_state_64(ctxt, smbase + 0x8000);
1394 else
1395 +#endif
1396 ret = rsm_load_state_32(ctxt, smbase + 0x8000);
1397
1398 if (ret != X86EMUL_CONTINUE) {
1399 diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
1400 index 229d99605165..5842c5f587fe 100644
1401 --- a/arch/x86/kvm/hyperv.c
1402 +++ b/arch/x86/kvm/hyperv.c
1403 @@ -132,8 +132,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
1404 struct kvm_vcpu *vcpu = NULL;
1405 int i;
1406
1407 - if (vpidx < KVM_MAX_VCPUS)
1408 - vcpu = kvm_get_vcpu(kvm, vpidx);
1409 + if (vpidx >= KVM_MAX_VCPUS)
1410 + return NULL;
1411 +
1412 + vcpu = kvm_get_vcpu(kvm, vpidx);
1413 if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
1414 return vcpu;
1415 kvm_for_each_vcpu(i, vcpu, kvm)
1416 @@ -689,6 +691,24 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
1417 stimer_cleanup(&hv_vcpu->stimer[i]);
1418 }
1419
1420 +bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
1421 +{
1422 + if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
1423 + return false;
1424 + return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
1425 +}
1426 +EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
1427 +
1428 +bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
1429 + struct hv_vp_assist_page *assist_page)
1430 +{
1431 + if (!kvm_hv_assist_page_enabled(vcpu))
1432 + return false;
1433 + return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
1434 + assist_page, sizeof(*assist_page));
1435 +}
1436 +EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
1437 +
1438 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
1439 {
1440 struct hv_message *msg = &stimer->msg;
1441 @@ -1040,21 +1060,41 @@ static u64 current_task_runtime_100ns(void)
1442
1443 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1444 {
1445 - struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1446 + struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1447
1448 switch (msr) {
1449 - case HV_X64_MSR_VP_INDEX:
1450 - if (!host)
1451 + case HV_X64_MSR_VP_INDEX: {
1452 + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
1453 + int vcpu_idx = kvm_vcpu_get_idx(vcpu);
1454 + u32 new_vp_index = (u32)data;
1455 +
1456 + if (!host || new_vp_index >= KVM_MAX_VCPUS)
1457 return 1;
1458 - hv->vp_index = (u32)data;
1459 +
1460 + if (new_vp_index == hv_vcpu->vp_index)
1461 + return 0;
1462 +
1463 + /*
1464 + * The VP index is initialized to vcpu_index by
1465 + * kvm_hv_vcpu_postcreate so they initially match. Now the
1466 + * VP index is changing, adjust num_mismatched_vp_indexes if
1467 + * it now matches or no longer matches vcpu_idx.
1468 + */
1469 + if (hv_vcpu->vp_index == vcpu_idx)
1470 + atomic_inc(&hv->num_mismatched_vp_indexes);
1471 + else if (new_vp_index == vcpu_idx)
1472 + atomic_dec(&hv->num_mismatched_vp_indexes);
1473 +
1474 + hv_vcpu->vp_index = new_vp_index;
1475 break;
1476 + }
1477 case HV_X64_MSR_VP_ASSIST_PAGE: {
1478 u64 gfn;
1479 unsigned long addr;
1480
1481 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1482 - hv->hv_vapic = data;
1483 - if (kvm_lapic_enable_pv_eoi(vcpu, 0))
1484 + hv_vcpu->hv_vapic = data;
1485 + if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
1486 return 1;
1487 break;
1488 }
1489 @@ -1064,10 +1104,11 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1490 return 1;
1491 if (__clear_user((void __user *)addr, PAGE_SIZE))
1492 return 1;
1493 - hv->hv_vapic = data;
1494 + hv_vcpu->hv_vapic = data;
1495 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1496 if (kvm_lapic_enable_pv_eoi(vcpu,
1497 - gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
1498 + gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1499 + sizeof(struct hv_vp_assist_page)))
1500 return 1;
1501 break;
1502 }
1503 @@ -1080,7 +1121,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1504 case HV_X64_MSR_VP_RUNTIME:
1505 if (!host)
1506 return 1;
1507 - hv->runtime_offset = data - current_task_runtime_100ns();
1508 + hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1509 break;
1510 case HV_X64_MSR_SCONTROL:
1511 case HV_X64_MSR_SVERSION:
1512 @@ -1172,11 +1213,11 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1513 bool host)
1514 {
1515 u64 data = 0;
1516 - struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1517 + struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1518
1519 switch (msr) {
1520 case HV_X64_MSR_VP_INDEX:
1521 - data = hv->vp_index;
1522 + data = hv_vcpu->vp_index;
1523 break;
1524 case HV_X64_MSR_EOI:
1525 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1526 @@ -1185,10 +1226,10 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1527 case HV_X64_MSR_TPR:
1528 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1529 case HV_X64_MSR_VP_ASSIST_PAGE:
1530 - data = hv->hv_vapic;
1531 + data = hv_vcpu->hv_vapic;
1532 break;
1533 case HV_X64_MSR_VP_RUNTIME:
1534 - data = current_task_runtime_100ns() + hv->runtime_offset;
1535 + data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1536 break;
1537 case HV_X64_MSR_SCONTROL:
1538 case HV_X64_MSR_SVERSION:
1539 diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
1540 index d6aa969e20f1..0e66c12ed2c3 100644
1541 --- a/arch/x86/kvm/hyperv.h
1542 +++ b/arch/x86/kvm/hyperv.h
1543 @@ -62,6 +62,10 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
1544 void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
1545 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
1546
1547 +bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
1548 +bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
1549 + struct hv_vp_assist_page *assist_page);
1550 +
1551 static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
1552 int timer_index)
1553 {
1554 diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
1555 index faa264822cee..007bc654f928 100644
1556 --- a/arch/x86/kvm/irq.c
1557 +++ b/arch/x86/kvm/irq.c
1558 @@ -172,3 +172,10 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
1559 __kvm_migrate_apic_timer(vcpu);
1560 __kvm_migrate_pit_timer(vcpu);
1561 }
1562 +
1563 +bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
1564 +{
1565 + bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE;
1566 +
1567 + return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm);
1568 +}
1569 diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
1570 index d5005cc26521..fd210cdd4983 100644
1571 --- a/arch/x86/kvm/irq.h
1572 +++ b/arch/x86/kvm/irq.h
1573 @@ -114,6 +114,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
1574 return mode != KVM_IRQCHIP_NONE;
1575 }
1576
1577 +bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1578 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
1579 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
1580 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
1581 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
1582 index 5f5bc5976804..262e49301cae 100644
1583 --- a/arch/x86/kvm/lapic.c
1584 +++ b/arch/x86/kvm/lapic.c
1585 @@ -2633,17 +2633,25 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
1586 return 0;
1587 }
1588
1589 -int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
1590 +int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
1591 {
1592 u64 addr = data & ~KVM_MSR_ENABLED;
1593 + struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
1594 + unsigned long new_len;
1595 +
1596 if (!IS_ALIGNED(addr, 4))
1597 return 1;
1598
1599 vcpu->arch.pv_eoi.msr_val = data;
1600 if (!pv_eoi_enabled(vcpu))
1601 return 0;
1602 - return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
1603 - addr, sizeof(u8));
1604 +
1605 + if (addr == ghc->gpa && len <= ghc->len)
1606 + new_len = ghc->len;
1607 + else
1608 + new_len = len;
1609 +
1610 + return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
1611 }
1612
1613 void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
1614 diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
1615 index ed0ed39abd36..ff6ef9c3d760 100644
1616 --- a/arch/x86/kvm/lapic.h
1617 +++ b/arch/x86/kvm/lapic.h
1618 @@ -120,7 +120,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
1619 return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
1620 }
1621
1622 -int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
1623 +int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len);
1624 void kvm_lapic_init(void);
1625 void kvm_lapic_exit(void);
1626
1627 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1628 index cdc0c460950f..88940261fb53 100644
1629 --- a/arch/x86/kvm/mmu.c
1630 +++ b/arch/x86/kvm/mmu.c
1631 @@ -1954,7 +1954,7 @@ static int is_empty_shadow_page(u64 *spt)
1632 * aggregate version in order to make the slab shrinker
1633 * faster
1634 */
1635 -static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
1636 +static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
1637 {
1638 kvm->arch.n_used_mmu_pages += nr;
1639 percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1640 @@ -2704,7 +2704,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
1641 * Changing the number of mmu pages allocated to the vm
1642 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
1643 */
1644 -void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
1645 +void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
1646 {
1647 LIST_HEAD(invalid_list);
1648
1649 @@ -5926,10 +5926,10 @@ out:
1650 /*
1651 * Caculate mmu pages needed for kvm.
1652 */
1653 -unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1654 +unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1655 {
1656 - unsigned int nr_mmu_pages;
1657 - unsigned int nr_pages = 0;
1658 + unsigned long nr_mmu_pages;
1659 + unsigned long nr_pages = 0;
1660 struct kvm_memslots *slots;
1661 struct kvm_memory_slot *memslot;
1662 int i;
1663 @@ -5942,8 +5942,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1664 }
1665
1666 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1667 - nr_mmu_pages = max(nr_mmu_pages,
1668 - (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1669 + nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
1670
1671 return nr_mmu_pages;
1672 }
1673 diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
1674 index 1fab69c0b2f3..65892288bf51 100644
1675 --- a/arch/x86/kvm/mmu.h
1676 +++ b/arch/x86/kvm/mmu.h
1677 @@ -69,7 +69,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
1678 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
1679 u64 fault_address, char *insn, int insn_len);
1680
1681 -static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
1682 +static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
1683 {
1684 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
1685 return kvm->arch.n_max_mmu_pages -
1686 diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
1687 index e9ea2d45ae66..9f72cc427158 100644
1688 --- a/arch/x86/kvm/mtrr.c
1689 +++ b/arch/x86/kvm/mtrr.c
1690 @@ -48,11 +48,6 @@ static bool msr_mtrr_valid(unsigned msr)
1691 return false;
1692 }
1693
1694 -static bool valid_pat_type(unsigned t)
1695 -{
1696 - return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
1697 -}
1698 -
1699 static bool valid_mtrr_type(unsigned t)
1700 {
1701 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
1702 @@ -67,10 +62,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1703 return false;
1704
1705 if (msr == MSR_IA32_CR_PAT) {
1706 - for (i = 0; i < 8; i++)
1707 - if (!valid_pat_type((data >> (i * 8)) & 0xff))
1708 - return false;
1709 - return true;
1710 + return kvm_pat_valid(data);
1711 } else if (msr == MSR_MTRRdefType) {
1712 if (data & ~0xcff)
1713 return false;
1714 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1715 index 0f33f00aa4df..ac2cc2ed7a85 100644
1716 --- a/arch/x86/kvm/svm.c
1717 +++ b/arch/x86/kvm/svm.c
1718 @@ -5622,6 +5622,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1719 svm->vmcb->save.cr2 = vcpu->arch.cr2;
1720
1721 clgi();
1722 + kvm_load_guest_xcr0(vcpu);
1723
1724 /*
1725 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
1726 @@ -5769,6 +5770,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1727 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
1728 kvm_before_interrupt(&svm->vcpu);
1729
1730 + kvm_put_guest_xcr0(vcpu);
1731 stgi();
1732
1733 /* Any pending NMI will happen here */
1734 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1735 index 2e310ea62d60..2938b4bcc968 100644
1736 --- a/arch/x86/kvm/vmx.c
1737 +++ b/arch/x86/kvm/vmx.c
1738 @@ -4135,7 +4135,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1739 return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
1740 &msr_info->data);
1741 case MSR_IA32_XSS:
1742 - if (!vmx_xsaves_supported())
1743 + if (!vmx_xsaves_supported() ||
1744 + (!msr_info->host_initiated &&
1745 + !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
1746 + guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
1747 return 1;
1748 msr_info->data = vcpu->arch.ia32_xss;
1749 break;
1750 @@ -4265,9 +4268,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1751 MSR_TYPE_W);
1752 break;
1753 case MSR_IA32_CR_PAT:
1754 + if (!kvm_pat_valid(data))
1755 + return 1;
1756 +
1757 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
1758 - if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
1759 - return 1;
1760 vmcs_write64(GUEST_IA32_PAT, data);
1761 vcpu->arch.pat = data;
1762 break;
1763 @@ -4301,7 +4305,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1764 return 1;
1765 return vmx_set_vmx_msr(vcpu, msr_index, data);
1766 case MSR_IA32_XSS:
1767 - if (!vmx_xsaves_supported())
1768 + if (!vmx_xsaves_supported() ||
1769 + (!msr_info->host_initiated &&
1770 + !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
1771 + guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
1772 return 1;
1773 /*
1774 * The only supported bit as of Skylake is bit 8, but
1775 @@ -10437,28 +10444,21 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
1776
1777 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
1778 {
1779 - u32 exit_intr_info = 0;
1780 - u16 basic_exit_reason = (u16)vmx->exit_reason;
1781 -
1782 - if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
1783 - || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
1784 + if (vmx->exit_reason != EXIT_REASON_EXCEPTION_NMI)
1785 return;
1786
1787 - if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
1788 - exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1789 - vmx->exit_intr_info = exit_intr_info;
1790 + vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1791
1792 /* if exit due to PF check for async PF */
1793 - if (is_page_fault(exit_intr_info))
1794 + if (is_page_fault(vmx->exit_intr_info))
1795 vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
1796
1797 /* Handle machine checks before interrupts are enabled */
1798 - if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
1799 - is_machine_check(exit_intr_info))
1800 + if (is_machine_check(vmx->exit_intr_info))
1801 kvm_machine_check();
1802
1803 /* We need to handle NMIs before interrupts are enabled */
1804 - if (is_nmi(exit_intr_info)) {
1805 + if (is_nmi(vmx->exit_intr_info)) {
1806 kvm_before_interrupt(&vmx->vcpu);
1807 asm("int $2");
1808 kvm_after_interrupt(&vmx->vcpu);
1809 @@ -10756,6 +10756,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
1810 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1811 vmx_set_interrupt_shadow(vcpu, 0);
1812
1813 + kvm_load_guest_xcr0(vcpu);
1814 +
1815 if (static_cpu_has(X86_FEATURE_PKU) &&
1816 kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
1817 vcpu->arch.pkru != vmx->host_pkru)
1818 @@ -10808,7 +10810,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
1819 "mov %%" _ASM_AX", %%cr2 \n\t"
1820 "3: \n\t"
1821 /* Check if vmlaunch of vmresume is needed */
1822 - "cmpl $0, %c[launched](%0) \n\t"
1823 + "cmpb $0, %c[launched](%0) \n\t"
1824 /* Load guest registers. Don't clobber flags. */
1825 "mov %c[rax](%0), %%" _ASM_AX " \n\t"
1826 "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
1827 @@ -10971,10 +10973,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
1828 __write_pkru(vmx->host_pkru);
1829 }
1830
1831 + kvm_put_guest_xcr0(vcpu);
1832 +
1833 vmx->nested.nested_run_pending = 0;
1834 vmx->idt_vectoring_info = 0;
1835
1836 vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
1837 + if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
1838 + kvm_machine_check();
1839 +
1840 if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
1841 return;
1842
1843 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1844 index c27ce6059090..cbc39751f36b 100644
1845 --- a/arch/x86/kvm/x86.c
1846 +++ b/arch/x86/kvm/x86.c
1847 @@ -713,7 +713,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
1848 }
1849 EXPORT_SYMBOL_GPL(kvm_lmsw);
1850
1851 -static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
1852 +void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
1853 {
1854 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
1855 !vcpu->guest_xcr0_loaded) {
1856 @@ -723,8 +723,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
1857 vcpu->guest_xcr0_loaded = 1;
1858 }
1859 }
1860 +EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
1861
1862 -static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
1863 +void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
1864 {
1865 if (vcpu->guest_xcr0_loaded) {
1866 if (vcpu->arch.xcr0 != host_xcr0)
1867 @@ -732,6 +733,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
1868 vcpu->guest_xcr0_loaded = 0;
1869 }
1870 }
1871 +EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
1872
1873 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
1874 {
1875 @@ -2494,7 +2496,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1876
1877 break;
1878 case MSR_KVM_PV_EOI_EN:
1879 - if (kvm_lapic_enable_pv_eoi(vcpu, data))
1880 + if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
1881 return 1;
1882 break;
1883
1884 @@ -4116,7 +4118,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
1885 }
1886
1887 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1888 - u32 kvm_nr_mmu_pages)
1889 + unsigned long kvm_nr_mmu_pages)
1890 {
1891 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1892 return -EINVAL;
1893 @@ -4130,7 +4132,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1894 return 0;
1895 }
1896
1897 -static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1898 +static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1899 {
1900 return kvm->arch.n_max_mmu_pages;
1901 }
1902 @@ -7225,9 +7227,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
1903 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
1904 }
1905
1906 +#ifdef CONFIG_X86_64
1907 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
1908 {
1909 -#ifdef CONFIG_X86_64
1910 struct desc_ptr dt;
1911 struct kvm_segment seg;
1912 unsigned long val;
1913 @@ -7277,10 +7279,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
1914
1915 for (i = 0; i < 6; i++)
1916 enter_smm_save_seg_64(vcpu, buf, i);
1917 -#else
1918 - WARN_ON_ONCE(1);
1919 -#endif
1920 }
1921 +#endif
1922
1923 static void enter_smm(struct kvm_vcpu *vcpu)
1924 {
1925 @@ -7291,9 +7291,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
1926
1927 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
1928 memset(buf, 0, 512);
1929 +#ifdef CONFIG_X86_64
1930 if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
1931 enter_smm_save_state_64(vcpu, buf);
1932 else
1933 +#endif
1934 enter_smm_save_state_32(vcpu, buf);
1935
1936 /*
1937 @@ -7351,8 +7353,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
1938 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
1939 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
1940
1941 +#ifdef CONFIG_X86_64
1942 if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
1943 kvm_x86_ops->set_efer(vcpu, 0);
1944 +#endif
1945
1946 kvm_update_cpuid(vcpu);
1947 kvm_mmu_reset_context(vcpu);
1948 @@ -7649,8 +7653,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1949 goto cancel_injection;
1950 }
1951
1952 - kvm_load_guest_xcr0(vcpu);
1953 -
1954 if (req_immediate_exit) {
1955 kvm_make_request(KVM_REQ_EVENT, vcpu);
1956 kvm_x86_ops->request_immediate_exit(vcpu);
1957 @@ -7703,8 +7705,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1958 vcpu->mode = OUTSIDE_GUEST_MODE;
1959 smp_wmb();
1960
1961 - kvm_put_guest_xcr0(vcpu);
1962 -
1963 kvm_before_interrupt(vcpu);
1964 kvm_x86_ops->handle_external_intr(vcpu);
1965 kvm_after_interrupt(vcpu);
1966 diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
1967 index 1826ed9dd1c8..3a91ea760f07 100644
1968 --- a/arch/x86/kvm/x86.h
1969 +++ b/arch/x86/kvm/x86.h
1970 @@ -345,4 +345,16 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
1971 __this_cpu_write(current_vcpu, NULL);
1972 }
1973
1974 +
1975 +static inline bool kvm_pat_valid(u64 data)
1976 +{
1977 + if (data & 0xF8F8F8F8F8F8F8F8ull)
1978 + return false;
1979 + /* 0, 1, 4, 5, 6, 7 are valid values. */
1980 + return (data | ((data & 0x0202020202020202ull) << 1)) == data;
1981 +}
1982 +
1983 +void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
1984 +void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
1985 +
1986 #endif
1987 diff --git a/block/blk-core.c b/block/blk-core.c
1988 index 4a3e1f417880..af635f878f96 100644
1989 --- a/block/blk-core.c
1990 +++ b/block/blk-core.c
1991 @@ -816,7 +816,8 @@ void blk_cleanup_queue(struct request_queue *q)
1992 blk_exit_queue(q);
1993
1994 if (q->mq_ops)
1995 - blk_mq_free_queue(q);
1996 + blk_mq_exit_queue(q);
1997 +
1998 percpu_ref_exit(&q->q_usage_counter);
1999
2000 spin_lock_irq(lock);
2001 diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
2002 index f4f7c73fb828..0529e94a20f7 100644
2003 --- a/block/blk-iolatency.c
2004 +++ b/block/blk-iolatency.c
2005 @@ -560,15 +560,12 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
2006 u64 now = ktime_to_ns(ktime_get());
2007 bool issue_as_root = bio_issue_as_root_blkg(bio);
2008 bool enabled = false;
2009 + int inflight = 0;
2010
2011 blkg = bio->bi_blkg;
2012 if (!blkg)
2013 return;
2014
2015 - /* We didn't actually submit this bio, don't account it. */
2016 - if (bio->bi_status == BLK_STS_AGAIN)
2017 - return;
2018 -
2019 iolat = blkg_to_lat(bio->bi_blkg);
2020 if (!iolat)
2021 return;
2022 @@ -585,41 +582,24 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
2023 }
2024 rqw = &iolat->rq_wait;
2025
2026 - atomic_dec(&rqw->inflight);
2027 - if (iolat->min_lat_nsec == 0)
2028 - goto next;
2029 - iolatency_record_time(iolat, &bio->bi_issue, now,
2030 - issue_as_root);
2031 - window_start = atomic64_read(&iolat->window_start);
2032 - if (now > window_start &&
2033 - (now - window_start) >= iolat->cur_win_nsec) {
2034 - if (atomic64_cmpxchg(&iolat->window_start,
2035 - window_start, now) == window_start)
2036 - iolatency_check_latencies(iolat, now);
2037 + inflight = atomic_dec_return(&rqw->inflight);
2038 + WARN_ON_ONCE(inflight < 0);
2039 + /*
2040 + * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
2041 + * submitted, so do not account for it.
2042 + */
2043 + if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
2044 + iolatency_record_time(iolat, &bio->bi_issue, now,
2045 + issue_as_root);
2046 + window_start = atomic64_read(&iolat->window_start);
2047 + if (now > window_start &&
2048 + (now - window_start) >= iolat->cur_win_nsec) {
2049 + if (atomic64_cmpxchg(&iolat->window_start,
2050 + window_start, now) == window_start)
2051 + iolatency_check_latencies(iolat, now);
2052 + }
2053 }
2054 -next:
2055 - wake_up(&rqw->wait);
2056 - blkg = blkg->parent;
2057 - }
2058 -}
2059 -
2060 -static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
2061 -{
2062 - struct blkcg_gq *blkg;
2063 -
2064 - blkg = bio->bi_blkg;
2065 - while (blkg && blkg->parent) {
2066 - struct rq_wait *rqw;
2067 - struct iolatency_grp *iolat;
2068 -
2069 - iolat = blkg_to_lat(blkg);
2070 - if (!iolat)
2071 - goto next;
2072 -
2073 - rqw = &iolat->rq_wait;
2074 - atomic_dec(&rqw->inflight);
2075 wake_up(&rqw->wait);
2076 -next:
2077 blkg = blkg->parent;
2078 }
2079 }
2080 @@ -635,7 +615,6 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
2081
2082 static struct rq_qos_ops blkcg_iolatency_ops = {
2083 .throttle = blkcg_iolatency_throttle,
2084 - .cleanup = blkcg_iolatency_cleanup,
2085 .done_bio = blkcg_iolatency_done_bio,
2086 .exit = blkcg_iolatency_exit,
2087 };
2088 diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
2089 index aafb44224c89..0b7297a43ccd 100644
2090 --- a/block/blk-mq-sysfs.c
2091 +++ b/block/blk-mq-sysfs.c
2092 @@ -10,6 +10,7 @@
2093 #include <linux/smp.h>
2094
2095 #include <linux/blk-mq.h>
2096 +#include "blk.h"
2097 #include "blk-mq.h"
2098 #include "blk-mq-tag.h"
2099
2100 @@ -21,6 +22,11 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
2101 {
2102 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
2103 kobj);
2104 +
2105 + if (hctx->flags & BLK_MQ_F_BLOCKING)
2106 + cleanup_srcu_struct(hctx->srcu);
2107 + blk_free_flush_queue(hctx->fq);
2108 + sbitmap_free(&hctx->ctx_map);
2109 free_cpumask_var(hctx->cpumask);
2110 kfree(hctx->ctxs);
2111 kfree(hctx);
2112 diff --git a/block/blk-mq.c b/block/blk-mq.c
2113 index 70d839b9c3b0..455fda99255a 100644
2114 --- a/block/blk-mq.c
2115 +++ b/block/blk-mq.c
2116 @@ -2157,12 +2157,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
2117 if (set->ops->exit_hctx)
2118 set->ops->exit_hctx(hctx, hctx_idx);
2119
2120 - if (hctx->flags & BLK_MQ_F_BLOCKING)
2121 - cleanup_srcu_struct(hctx->srcu);
2122 -
2123 blk_mq_remove_cpuhp(hctx);
2124 - blk_free_flush_queue(hctx->fq);
2125 - sbitmap_free(&hctx->ctx_map);
2126 }
2127
2128 static void blk_mq_exit_hw_queues(struct request_queue *q,
2129 @@ -2662,7 +2657,8 @@ err_exit:
2130 }
2131 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2132
2133 -void blk_mq_free_queue(struct request_queue *q)
2134 +/* tags can _not_ be used after returning from blk_mq_exit_queue */
2135 +void blk_mq_exit_queue(struct request_queue *q)
2136 {
2137 struct blk_mq_tag_set *set = q->tag_set;
2138
2139 diff --git a/block/blk-mq.h b/block/blk-mq.h
2140 index 9497b47e2526..5ad9251627f8 100644
2141 --- a/block/blk-mq.h
2142 +++ b/block/blk-mq.h
2143 @@ -31,7 +31,7 @@ struct blk_mq_ctx {
2144 } ____cacheline_aligned_in_smp;
2145
2146 void blk_mq_freeze_queue(struct request_queue *q);
2147 -void blk_mq_free_queue(struct request_queue *q);
2148 +void blk_mq_exit_queue(struct request_queue *q);
2149 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
2150 void blk_mq_wake_waiters(struct request_queue *q);
2151 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
2152 diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
2153 index be5d1abd3e8e..8390c5b54c3b 100644
2154 --- a/drivers/char/tpm/st33zp24/i2c.c
2155 +++ b/drivers/char/tpm/st33zp24/i2c.c
2156 @@ -33,7 +33,7 @@
2157
2158 struct st33zp24_i2c_phy {
2159 struct i2c_client *client;
2160 - u8 buf[TPM_BUFSIZE + 1];
2161 + u8 buf[ST33ZP24_BUFSIZE + 1];
2162 int io_lpcpd;
2163 };
2164
2165 diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
2166 index d7909ab287a8..ff019a1e3c68 100644
2167 --- a/drivers/char/tpm/st33zp24/spi.c
2168 +++ b/drivers/char/tpm/st33zp24/spi.c
2169 @@ -63,7 +63,7 @@
2170 * some latency byte before the answer is available (max 15).
2171 * We have 2048 + 1024 + 15.
2172 */
2173 -#define ST33ZP24_SPI_BUFFER_SIZE (TPM_BUFSIZE + (TPM_BUFSIZE / 2) +\
2174 +#define ST33ZP24_SPI_BUFFER_SIZE (ST33ZP24_BUFSIZE + (ST33ZP24_BUFSIZE / 2) +\
2175 MAX_SPI_LATENCY)
2176
2177
2178 diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
2179 index 6f4a4198af6a..20da0a84988d 100644
2180 --- a/drivers/char/tpm/st33zp24/st33zp24.h
2181 +++ b/drivers/char/tpm/st33zp24/st33zp24.h
2182 @@ -18,8 +18,8 @@
2183 #ifndef __LOCAL_ST33ZP24_H__
2184 #define __LOCAL_ST33ZP24_H__
2185
2186 -#define TPM_WRITE_DIRECTION 0x80
2187 -#define TPM_BUFSIZE 2048
2188 +#define TPM_WRITE_DIRECTION 0x80
2189 +#define ST33ZP24_BUFSIZE 2048
2190
2191 struct st33zp24_dev {
2192 struct tpm_chip *chip;
2193 diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
2194 index 977fd42daa1b..3b4e9672ff6c 100644
2195 --- a/drivers/char/tpm/tpm_i2c_infineon.c
2196 +++ b/drivers/char/tpm/tpm_i2c_infineon.c
2197 @@ -26,8 +26,7 @@
2198 #include <linux/wait.h>
2199 #include "tpm.h"
2200
2201 -/* max. buffer size supported by our TPM */
2202 -#define TPM_BUFSIZE 1260
2203 +#define TPM_I2C_INFINEON_BUFSIZE 1260
2204
2205 /* max. number of iterations after I2C NAK */
2206 #define MAX_COUNT 3
2207 @@ -63,11 +62,13 @@ enum i2c_chip_type {
2208 UNKNOWN,
2209 };
2210
2211 -/* Structure to store I2C TPM specific stuff */
2212 struct tpm_inf_dev {
2213 struct i2c_client *client;
2214 int locality;
2215 - u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */
2216 + /* In addition to the data itself, the buffer must fit the 7-bit I2C
2217 + * address and the direction bit.
2218 + */
2219 + u8 buf[TPM_I2C_INFINEON_BUFSIZE + 1];
2220 struct tpm_chip *chip;
2221 enum i2c_chip_type chip_type;
2222 unsigned int adapterlimit;
2223 @@ -219,7 +220,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
2224 .buf = tpm_dev.buf
2225 };
2226
2227 - if (len > TPM_BUFSIZE)
2228 + if (len > TPM_I2C_INFINEON_BUFSIZE)
2229 return -EINVAL;
2230
2231 if (!tpm_dev.client->adapter->algo->master_xfer)
2232 @@ -527,8 +528,8 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
2233 u8 retries = 0;
2234 u8 sts = TPM_STS_GO;
2235
2236 - if (len > TPM_BUFSIZE)
2237 - return -E2BIG; /* command is too long for our tpm, sorry */
2238 + if (len > TPM_I2C_INFINEON_BUFSIZE)
2239 + return -E2BIG;
2240
2241 if (request_locality(chip, 0) < 0)
2242 return -EBUSY;
2243 diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
2244 index b8defdfdf2dc..280308009784 100644
2245 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c
2246 +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
2247 @@ -35,14 +35,12 @@
2248 #include "tpm.h"
2249
2250 /* I2C interface offsets */
2251 -#define TPM_STS 0x00
2252 -#define TPM_BURST_COUNT 0x01
2253 -#define TPM_DATA_FIFO_W 0x20
2254 -#define TPM_DATA_FIFO_R 0x40
2255 -#define TPM_VID_DID_RID 0x60
2256 -/* TPM command header size */
2257 -#define TPM_HEADER_SIZE 10
2258 -#define TPM_RETRY 5
2259 +#define TPM_STS 0x00
2260 +#define TPM_BURST_COUNT 0x01
2261 +#define TPM_DATA_FIFO_W 0x20
2262 +#define TPM_DATA_FIFO_R 0x40
2263 +#define TPM_VID_DID_RID 0x60
2264 +#define TPM_I2C_RETRIES 5
2265 /*
2266 * I2C bus device maximum buffer size w/o counting I2C address or command
2267 * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data
2268 @@ -292,7 +290,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
2269 dev_err(dev, "%s() count < header size\n", __func__);
2270 return -EIO;
2271 }
2272 - for (retries = 0; retries < TPM_RETRY; retries++) {
2273 + for (retries = 0; retries < TPM_I2C_RETRIES; retries++) {
2274 if (retries > 0) {
2275 /* if this is not the first trial, set responseRetry */
2276 i2c_nuvoton_write_status(client,
2277 diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
2278 index 0934d3724495..4080d4e78e8e 100644
2279 --- a/drivers/clk/clk-s2mps11.c
2280 +++ b/drivers/clk/clk-s2mps11.c
2281 @@ -255,7 +255,7 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
2282 * This requires of_device_id table. In the same time this will not change the
2283 * actual *device* matching so do not add .of_match_table.
2284 */
2285 -static const struct of_device_id s2mps11_dt_match[] = {
2286 +static const struct of_device_id s2mps11_dt_match[] __used = {
2287 {
2288 .compatible = "samsung,s2mps11-clk",
2289 .data = (void *)S2MPS11X,
2290 diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
2291 index 92d04ce2dee6..53cdc0ec40f3 100644
2292 --- a/drivers/clk/tegra/clk-audio-sync.c
2293 +++ b/drivers/clk/tegra/clk-audio-sync.c
2294 @@ -55,7 +55,7 @@ const struct clk_ops tegra_clk_sync_source_ops = {
2295 };
2296
2297 struct clk *tegra_clk_register_sync_source(const char *name,
2298 - unsigned long rate, unsigned long max_rate)
2299 + unsigned long max_rate)
2300 {
2301 struct tegra_clk_sync_source *sync;
2302 struct clk_init_data init;
2303 @@ -67,7 +67,6 @@ struct clk *tegra_clk_register_sync_source(const char *name,
2304 return ERR_PTR(-ENOMEM);
2305 }
2306
2307 - sync->rate = rate;
2308 sync->max_rate = max_rate;
2309
2310 init.ops = &tegra_clk_sync_source_ops;
2311 diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c
2312 index b37cae7af26d..02dd6487d855 100644
2313 --- a/drivers/clk/tegra/clk-tegra-audio.c
2314 +++ b/drivers/clk/tegra/clk-tegra-audio.c
2315 @@ -49,8 +49,6 @@ struct tegra_sync_source_initdata {
2316 #define SYNC(_name) \
2317 {\
2318 .name = #_name,\
2319 - .rate = 24000000,\
2320 - .max_rate = 24000000,\
2321 .clk_id = tegra_clk_ ## _name,\
2322 }
2323
2324 @@ -176,7 +174,7 @@ static void __init tegra_audio_sync_clk_init(void __iomem *clk_base,
2325 void __init tegra_audio_clk_init(void __iomem *clk_base,
2326 void __iomem *pmc_base, struct tegra_clk *tegra_clks,
2327 struct tegra_audio_clk_info *audio_info,
2328 - unsigned int num_plls)
2329 + unsigned int num_plls, unsigned long sync_max_rate)
2330 {
2331 struct clk *clk;
2332 struct clk **dt_clk;
2333 @@ -221,8 +219,7 @@ void __init tegra_audio_clk_init(void __iomem *clk_base,
2334 if (!dt_clk)
2335 continue;
2336
2337 - clk = tegra_clk_register_sync_source(data->name,
2338 - data->rate, data->max_rate);
2339 + clk = tegra_clk_register_sync_source(data->name, sync_max_rate);
2340 *dt_clk = clk;
2341 }
2342
2343 diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
2344 index 1824f014202b..625d11091330 100644
2345 --- a/drivers/clk/tegra/clk-tegra114.c
2346 +++ b/drivers/clk/tegra/clk-tegra114.c
2347 @@ -1190,6 +1190,13 @@ static struct tegra_clk_init_table init_table[] __initdata = {
2348 { TEGRA114_CLK_XUSB_FALCON_SRC, TEGRA114_CLK_PLL_P, 204000000, 0 },
2349 { TEGRA114_CLK_XUSB_HOST_SRC, TEGRA114_CLK_PLL_P, 102000000, 0 },
2350 { TEGRA114_CLK_VDE, TEGRA114_CLK_CLK_MAX, 600000000, 0 },
2351 + { TEGRA114_CLK_SPDIF_IN_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2352 + { TEGRA114_CLK_I2S0_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2353 + { TEGRA114_CLK_I2S1_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2354 + { TEGRA114_CLK_I2S2_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2355 + { TEGRA114_CLK_I2S3_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2356 + { TEGRA114_CLK_I2S4_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2357 + { TEGRA114_CLK_VIMCLK_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2358 /* must be the last entry */
2359 { TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0 },
2360 };
2361 @@ -1362,7 +1369,7 @@ static void __init tegra114_clock_init(struct device_node *np)
2362 tegra114_periph_clk_init(clk_base, pmc_base);
2363 tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks,
2364 tegra114_audio_plls,
2365 - ARRAY_SIZE(tegra114_audio_plls));
2366 + ARRAY_SIZE(tegra114_audio_plls), 24000000);
2367 tegra_pmc_clk_init(pmc_base, tegra114_clks);
2368 tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
2369 &pll_x_params);
2370 diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
2371 index b6cf28ca2ed2..df0018f7bf7e 100644
2372 --- a/drivers/clk/tegra/clk-tegra124.c
2373 +++ b/drivers/clk/tegra/clk-tegra124.c
2374 @@ -1291,6 +1291,13 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
2375 { TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1 },
2376 { TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0 },
2377 { TEGRA124_CLK_VIC03, TEGRA124_CLK_PLL_C3, 0, 0 },
2378 + { TEGRA124_CLK_SPDIF_IN_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2379 + { TEGRA124_CLK_I2S0_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2380 + { TEGRA124_CLK_I2S1_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2381 + { TEGRA124_CLK_I2S2_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2382 + { TEGRA124_CLK_I2S3_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2383 + { TEGRA124_CLK_I2S4_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2384 + { TEGRA124_CLK_VIMCLK_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2385 /* must be the last entry */
2386 { TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0 },
2387 };
2388 @@ -1455,7 +1462,7 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
2389 tegra124_periph_clk_init(clk_base, pmc_base);
2390 tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks,
2391 tegra124_audio_plls,
2392 - ARRAY_SIZE(tegra124_audio_plls));
2393 + ARRAY_SIZE(tegra124_audio_plls), 24576000);
2394 tegra_pmc_clk_init(pmc_base, tegra124_clks);
2395
2396 /* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
2397 diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
2398 index 4e1bc23c9865..080bfa24863e 100644
2399 --- a/drivers/clk/tegra/clk-tegra210.c
2400 +++ b/drivers/clk/tegra/clk-tegra210.c
2401 @@ -3369,6 +3369,15 @@ static struct tegra_clk_init_table init_table[] __initdata = {
2402 { TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 },
2403 { TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 },
2404 { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
2405 + { TEGRA210_CLK_SPDIF_IN_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2406 + { TEGRA210_CLK_I2S0_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2407 + { TEGRA210_CLK_I2S1_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2408 + { TEGRA210_CLK_I2S2_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2409 + { TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2410 + { TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2411 + { TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2412 + { TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 },
2413 + { TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 },
2414 /* This MUST be the last entry. */
2415 { TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
2416 };
2417 @@ -3562,7 +3571,7 @@ static void __init tegra210_clock_init(struct device_node *np)
2418 tegra210_periph_clk_init(clk_base, pmc_base);
2419 tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks,
2420 tegra210_audio_plls,
2421 - ARRAY_SIZE(tegra210_audio_plls));
2422 + ARRAY_SIZE(tegra210_audio_plls), 24576000);
2423 tegra_pmc_clk_init(pmc_base, tegra210_clks);
2424
2425 /* For Tegra210, PLLD is the only source for DSIA & DSIB */
2426 diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
2427 index acfe661b2ae7..e0aaecd98fbf 100644
2428 --- a/drivers/clk/tegra/clk-tegra30.c
2429 +++ b/drivers/clk/tegra/clk-tegra30.c
2430 @@ -1267,6 +1267,13 @@ static struct tegra_clk_init_table init_table[] __initdata = {
2431 { TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
2432 { TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
2433 { TEGRA30_CLK_VDE, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
2434 + { TEGRA30_CLK_SPDIF_IN_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2435 + { TEGRA30_CLK_I2S0_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2436 + { TEGRA30_CLK_I2S1_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2437 + { TEGRA30_CLK_I2S2_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2438 + { TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2439 + { TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2440 + { TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2441 /* must be the last entry */
2442 { TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
2443 };
2444 @@ -1344,7 +1351,7 @@ static void __init tegra30_clock_init(struct device_node *np)
2445 tegra30_periph_clk_init();
2446 tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks,
2447 tegra30_audio_plls,
2448 - ARRAY_SIZE(tegra30_audio_plls));
2449 + ARRAY_SIZE(tegra30_audio_plls), 24000000);
2450 tegra_pmc_clk_init(pmc_base, tegra30_clks);
2451
2452 tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
2453 diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
2454 index d2c3a010f8e9..09bccbb9640c 100644
2455 --- a/drivers/clk/tegra/clk.h
2456 +++ b/drivers/clk/tegra/clk.h
2457 @@ -41,7 +41,7 @@ extern const struct clk_ops tegra_clk_sync_source_ops;
2458 extern int *periph_clk_enb_refcnt;
2459
2460 struct clk *tegra_clk_register_sync_source(const char *name,
2461 - unsigned long fixed_rate, unsigned long max_rate);
2462 + unsigned long max_rate);
2463
2464 /**
2465 * struct tegra_clk_frac_div - fractional divider clock
2466 @@ -796,7 +796,7 @@ void tegra_register_devclks(struct tegra_devclk *dev_clks, int num);
2467 void tegra_audio_clk_init(void __iomem *clk_base,
2468 void __iomem *pmc_base, struct tegra_clk *tegra_clks,
2469 struct tegra_audio_clk_info *audio_info,
2470 - unsigned int num_plls);
2471 + unsigned int num_plls, unsigned long sync_max_rate);
2472
2473 void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base,
2474 struct tegra_clk *tegra_clks,
2475 diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
2476 index 1ff229c2aeab..186a2536fb8b 100644
2477 --- a/drivers/crypto/ccree/cc_driver.c
2478 +++ b/drivers/crypto/ccree/cc_driver.c
2479 @@ -364,7 +364,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
2480 rc = cc_ivgen_init(new_drvdata);
2481 if (rc) {
2482 dev_err(dev, "cc_ivgen_init failed\n");
2483 - goto post_power_mgr_err;
2484 + goto post_buf_mgr_err;
2485 }
2486
2487 /* Allocate crypto algs */
2488 @@ -387,6 +387,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
2489 goto post_hash_err;
2490 }
2491
2492 + /* All set, we can allow autosuspend */
2493 + cc_pm_go(new_drvdata);
2494 +
2495 /* If we got here and FIPS mode is enabled
2496 * it means all FIPS test passed, so let TEE
2497 * know we're good.
2498 @@ -401,8 +404,6 @@ post_cipher_err:
2499 cc_cipher_free(new_drvdata);
2500 post_ivgen_err:
2501 cc_ivgen_fini(new_drvdata);
2502 -post_power_mgr_err:
2503 - cc_pm_fini(new_drvdata);
2504 post_buf_mgr_err:
2505 cc_buffer_mgr_fini(new_drvdata);
2506 post_req_mgr_err:
2507 diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
2508 index 79fc0a37ba6e..638082dff183 100644
2509 --- a/drivers/crypto/ccree/cc_pm.c
2510 +++ b/drivers/crypto/ccree/cc_pm.c
2511 @@ -103,20 +103,19 @@ int cc_pm_put_suspend(struct device *dev)
2512
2513 int cc_pm_init(struct cc_drvdata *drvdata)
2514 {
2515 - int rc = 0;
2516 struct device *dev = drvdata_to_dev(drvdata);
2517
2518 /* must be before the enabling to avoid resdundent suspending */
2519 pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
2520 pm_runtime_use_autosuspend(dev);
2521 /* activate the PM module */
2522 - rc = pm_runtime_set_active(dev);
2523 - if (rc)
2524 - return rc;
2525 - /* enable the PM module*/
2526 - pm_runtime_enable(dev);
2527 + return pm_runtime_set_active(dev);
2528 +}
2529
2530 - return rc;
2531 +/* enable the PM module*/
2532 +void cc_pm_go(struct cc_drvdata *drvdata)
2533 +{
2534 + pm_runtime_enable(drvdata_to_dev(drvdata));
2535 }
2536
2537 void cc_pm_fini(struct cc_drvdata *drvdata)
2538 diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
2539 index 020a5403c58b..907a6db4d6c0 100644
2540 --- a/drivers/crypto/ccree/cc_pm.h
2541 +++ b/drivers/crypto/ccree/cc_pm.h
2542 @@ -16,6 +16,7 @@
2543 extern const struct dev_pm_ops ccree_pm;
2544
2545 int cc_pm_init(struct cc_drvdata *drvdata);
2546 +void cc_pm_go(struct cc_drvdata *drvdata);
2547 void cc_pm_fini(struct cc_drvdata *drvdata);
2548 int cc_pm_suspend(struct device *dev);
2549 int cc_pm_resume(struct device *dev);
2550 @@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
2551 return 0;
2552 }
2553
2554 +static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
2555 +
2556 static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
2557
2558 static inline int cc_pm_suspend(struct device *dev)
2559 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
2560 index 5f3f54073818..17862b9ecccd 100644
2561 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
2562 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
2563 @@ -1070,7 +1070,7 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
2564 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
2565 {
2566 struct amdgpu_device *adev = ring->adev;
2567 - uint32_t rptr = amdgpu_ring_get_rptr(ring);
2568 + uint32_t rptr;
2569 unsigned i;
2570 int r, timeout = adev->usec_timeout;
2571
2572 @@ -1084,6 +1084,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
2573 ring->idx, r);
2574 return r;
2575 }
2576 +
2577 + rptr = amdgpu_ring_get_rptr(ring);
2578 +
2579 amdgpu_ring_write(ring, VCE_CMD_END);
2580 amdgpu_ring_commit(ring);
2581
2582 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
2583 index 400fc74bbae2..205e683fb920 100644
2584 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
2585 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
2586 @@ -431,7 +431,7 @@ error:
2587 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
2588 {
2589 struct amdgpu_device *adev = ring->adev;
2590 - uint32_t rptr = amdgpu_ring_get_rptr(ring);
2591 + uint32_t rptr;
2592 unsigned i;
2593 int r;
2594
2595 @@ -441,6 +441,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
2596 ring->idx, r);
2597 return r;
2598 }
2599 +
2600 + rptr = amdgpu_ring_get_rptr(ring);
2601 +
2602 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
2603 amdgpu_ring_commit(ring);
2604
2605 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
2606 index 46568497ef18..782411649816 100644
2607 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
2608 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
2609 @@ -82,7 +82,8 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
2610
2611 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
2612 {
2613 - SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
2614 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
2615 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
2616 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
2617 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
2618 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
2619 diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
2620 index d4070839ac80..80613a74df42 100644
2621 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
2622 +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
2623 @@ -170,7 +170,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
2624 static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
2625 {
2626 struct amdgpu_device *adev = ring->adev;
2627 - uint32_t rptr = amdgpu_ring_get_rptr(ring);
2628 + uint32_t rptr;
2629 unsigned i;
2630 int r;
2631
2632 @@ -180,6 +180,9 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
2633 ring->idx, r);
2634 return r;
2635 }
2636 +
2637 + rptr = amdgpu_ring_get_rptr(ring);
2638 +
2639 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
2640 amdgpu_ring_commit(ring);
2641
2642 diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
2643 index 057151b17b45..ce16b8329af0 100644
2644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
2645 +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
2646 @@ -175,7 +175,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
2647 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
2648 {
2649 struct amdgpu_device *adev = ring->adev;
2650 - uint32_t rptr = amdgpu_ring_get_rptr(ring);
2651 + uint32_t rptr;
2652 unsigned i;
2653 int r;
2654
2655 @@ -188,6 +188,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
2656 ring->me, ring->idx, r);
2657 return r;
2658 }
2659 +
2660 + rptr = amdgpu_ring_get_rptr(ring);
2661 +
2662 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
2663 amdgpu_ring_commit(ring);
2664
2665 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
2666 index 5aba50f63ac6..938d0053a820 100644
2667 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
2668 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
2669 @@ -310,6 +310,7 @@ static const struct kfd_deviceid supported_devices[] = {
2670 { 0x67CF, &polaris10_device_info }, /* Polaris10 */
2671 { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
2672 { 0x67DF, &polaris10_device_info }, /* Polaris10 */
2673 + { 0x6FDF, &polaris10_device_info }, /* Polaris10 */
2674 { 0x67E0, &polaris11_device_info }, /* Polaris11 */
2675 { 0x67E1, &polaris11_device_info }, /* Polaris11 */
2676 { 0x67E3, &polaris11_device_info }, /* Polaris11 */
2677 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
2678 index 59445c83f023..c85bea70d965 100644
2679 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
2680 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
2681 @@ -377,9 +377,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
2682 drm_connector_attach_encoder(&aconnector->base,
2683 &aconnector->mst_encoder->base);
2684
2685 - /*
2686 - * TODO: understand why this one is needed
2687 - */
2688 drm_object_attach_property(
2689 &connector->base,
2690 dev->mode_config.path_property,
2691 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
2692 index 2aab1b475945..cede78cdf28d 100644
2693 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
2694 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
2695 @@ -669,20 +669,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
2696 for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
2697 table->WatermarkRow[1][i].MinClock =
2698 cpu_to_le16((uint16_t)
2699 - (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
2700 - 1000);
2701 + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
2702 + 1000));
2703 table->WatermarkRow[1][i].MaxClock =
2704 cpu_to_le16((uint16_t)
2705 - (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
2706 - 100);
2707 + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
2708 + 1000));
2709 table->WatermarkRow[1][i].MinUclk =
2710 cpu_to_le16((uint16_t)
2711 - (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
2712 - 1000);
2713 + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
2714 + 1000));
2715 table->WatermarkRow[1][i].MaxUclk =
2716 cpu_to_le16((uint16_t)
2717 - (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
2718 - 1000);
2719 + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
2720 + 1000));
2721 table->WatermarkRow[1][i].WmSetting = (uint8_t)
2722 wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
2723 }
2724 @@ -690,20 +690,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
2725 for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
2726 table->WatermarkRow[0][i].MinClock =
2727 cpu_to_le16((uint16_t)
2728 - (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
2729 - 1000);
2730 + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
2731 + 1000));
2732 table->WatermarkRow[0][i].MaxClock =
2733 cpu_to_le16((uint16_t)
2734 - (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
2735 - 1000);
2736 + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
2737 + 1000));
2738 table->WatermarkRow[0][i].MinUclk =
2739 cpu_to_le16((uint16_t)
2740 - (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
2741 - 1000);
2742 + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
2743 + 1000));
2744 table->WatermarkRow[0][i].MaxUclk =
2745 cpu_to_le16((uint16_t)
2746 - (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
2747 - 1000);
2748 + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
2749 + 1000));
2750 table->WatermarkRow[0][i].WmSetting = (uint8_t)
2751 wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
2752 }
2753 diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
2754 index 281cf9cbb44c..1a4b44923aec 100644
2755 --- a/drivers/gpu/drm/drm_atomic.c
2756 +++ b/drivers/gpu/drm/drm_atomic.c
2757 @@ -1702,6 +1702,27 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
2758 struct drm_connector *connector = conn_state->connector;
2759 struct drm_crtc_state *crtc_state;
2760
2761 + /*
2762 + * For compatibility with legacy users, we want to make sure that
2763 + * we allow DPMS On<->Off modesets on unregistered connectors, since
2764 + * legacy modesetting users will not be expecting these to fail. We do
2765 + * not however, want to allow legacy users to assign a connector
2766 + * that's been unregistered from sysfs to another CRTC, since doing
2767 + * this with a now non-existent connector could potentially leave us
2768 + * in an invalid state.
2769 + *
2770 + * Since the connector can be unregistered at any point during an
2771 + * atomic check or commit, this is racy. But that's OK: all we care
2772 + * about is ensuring that userspace can't use this connector for new
2773 + * configurations after it's been notified that the connector is no
2774 + * longer present.
2775 + */
2776 + if (!READ_ONCE(connector->registered) && crtc) {
2777 + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
2778 + connector->base.id, connector->name);
2779 + return -EINVAL;
2780 + }
2781 +
2782 if (conn_state->crtc == crtc)
2783 return 0;
2784
2785 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
2786 index 138680b37c70..f8672238d444 100644
2787 --- a/drivers/gpu/drm/drm_ioc32.c
2788 +++ b/drivers/gpu/drm/drm_ioc32.c
2789 @@ -185,7 +185,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
2790 m32.size = map.size;
2791 m32.type = map.type;
2792 m32.flags = map.flags;
2793 - m32.handle = ptr_to_compat(map.handle);
2794 + m32.handle = ptr_to_compat((void __user *)map.handle);
2795 m32.mtrr = map.mtrr;
2796 if (copy_to_user(argp, &m32, sizeof(m32)))
2797 return -EFAULT;
2798 @@ -216,7 +216,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
2799
2800 m32.offset = map.offset;
2801 m32.mtrr = map.mtrr;
2802 - m32.handle = ptr_to_compat(map.handle);
2803 + m32.handle = ptr_to_compat((void __user *)map.handle);
2804 if (map.handle != compat_ptr(m32.handle))
2805 pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
2806 map.handle, m32.type, m32.offset);
2807 @@ -529,7 +529,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
2808 if (err)
2809 return err;
2810
2811 - req32.handle = ptr_to_compat(req.handle);
2812 + req32.handle = ptr_to_compat((void __user *)req.handle);
2813 if (copy_to_user(argp, &req32, sizeof(req32)))
2814 return -EFAULT;
2815
2816 diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
2817 index 28cdcf76b6f9..d1859bcc7ccb 100644
2818 --- a/drivers/gpu/drm/drm_vblank.c
2819 +++ b/drivers/gpu/drm/drm_vblank.c
2820 @@ -105,13 +105,20 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
2821 write_sequnlock(&vblank->seqlock);
2822 }
2823
2824 +static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe)
2825 +{
2826 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
2827 +
2828 + return vblank->max_vblank_count ?: dev->max_vblank_count;
2829 +}
2830 +
2831 /*
2832 * "No hw counter" fallback implementation of .get_vblank_counter() hook,
2833 * if there is no useable hardware frame counter available.
2834 */
2835 static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
2836 {
2837 - WARN_ON_ONCE(dev->max_vblank_count != 0);
2838 + WARN_ON_ONCE(drm_max_vblank_count(dev, pipe) != 0);
2839 return 0;
2840 }
2841
2842 @@ -198,6 +205,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
2843 ktime_t t_vblank;
2844 int count = DRM_TIMESTAMP_MAXRETRIES;
2845 int framedur_ns = vblank->framedur_ns;
2846 + u32 max_vblank_count = drm_max_vblank_count(dev, pipe);
2847
2848 /*
2849 * Interrupts were disabled prior to this call, so deal with counter
2850 @@ -216,9 +224,9 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
2851 rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq);
2852 } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
2853
2854 - if (dev->max_vblank_count != 0) {
2855 + if (max_vblank_count) {
2856 /* trust the hw counter when it's around */
2857 - diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
2858 + diff = (cur_vblank - vblank->last) & max_vblank_count;
2859 } else if (rc && framedur_ns) {
2860 u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
2861
2862 @@ -1204,6 +1212,37 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc)
2863 }
2864 EXPORT_SYMBOL(drm_crtc_vblank_reset);
2865
2866 +/**
2867 + * drm_crtc_set_max_vblank_count - configure the hw max vblank counter value
2868 + * @crtc: CRTC in question
2869 + * @max_vblank_count: max hardware vblank counter value
2870 + *
2871 + * Update the maximum hardware vblank counter value for @crtc
2872 + * at runtime. Useful for hardware where the operation of the
2873 + * hardware vblank counter depends on the currently active
2874 + * display configuration.
2875 + *
2876 + * For example, if the hardware vblank counter does not work
2877 + * when a specific connector is active the maximum can be set
2878 + * to zero. And when that specific connector isn't active the
2879 + * maximum can again be set to the appropriate non-zero value.
2880 + *
2881 + * If used, must be called before drm_vblank_on().
2882 + */
2883 +void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
2884 + u32 max_vblank_count)
2885 +{
2886 + struct drm_device *dev = crtc->dev;
2887 + unsigned int pipe = drm_crtc_index(crtc);
2888 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
2889 +
2890 + WARN_ON(dev->max_vblank_count);
2891 + WARN_ON(!READ_ONCE(vblank->inmodeset));
2892 +
2893 + vblank->max_vblank_count = max_vblank_count;
2894 +}
2895 +EXPORT_SYMBOL(drm_crtc_set_max_vblank_count);
2896 +
2897 /**
2898 * drm_crtc_vblank_on - enable vblank events on a CRTC
2899 * @crtc: CRTC in question
2900 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
2901 index f9ce35da4123..e063e98d1e82 100644
2902 --- a/drivers/gpu/drm/i915/i915_debugfs.c
2903 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
2904 @@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
2905 if (!IS_GEN5(dev_priv))
2906 return -ENODEV;
2907
2908 + intel_runtime_pm_get(dev_priv);
2909 +
2910 ret = mutex_lock_interruptible(&dev->struct_mutex);
2911 if (ret)
2912 return ret;
2913 @@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
2914 seq_printf(m, "GFX power: %ld\n", gfx);
2915 seq_printf(m, "Total power: %ld\n", chipset + gfx);
2916
2917 + intel_runtime_pm_put(dev_priv);
2918 +
2919 return 0;
2920 }
2921
2922 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
2923 index 03cda197fb6b..937287710042 100644
2924 --- a/drivers/gpu/drm/i915/i915_gem.c
2925 +++ b/drivers/gpu/drm/i915/i915_gem.c
2926 @@ -1874,20 +1874,28 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
2927 * pages from.
2928 */
2929 if (!obj->base.filp) {
2930 - i915_gem_object_put(obj);
2931 - return -ENXIO;
2932 + addr = -ENXIO;
2933 + goto err;
2934 + }
2935 +
2936 + if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
2937 + addr = -EINVAL;
2938 + goto err;
2939 }
2940
2941 addr = vm_mmap(obj->base.filp, 0, args->size,
2942 PROT_READ | PROT_WRITE, MAP_SHARED,
2943 args->offset);
2944 + if (IS_ERR_VALUE(addr))
2945 + goto err;
2946 +
2947 if (args->flags & I915_MMAP_WC) {
2948 struct mm_struct *mm = current->mm;
2949 struct vm_area_struct *vma;
2950
2951 if (down_write_killable(&mm->mmap_sem)) {
2952 - i915_gem_object_put(obj);
2953 - return -EINTR;
2954 + addr = -EINTR;
2955 + goto err;
2956 }
2957 vma = find_vma(mm, addr);
2958 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
2959 @@ -1896,17 +1904,20 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
2960 else
2961 addr = -ENOMEM;
2962 up_write(&mm->mmap_sem);
2963 + if (IS_ERR_VALUE(addr))
2964 + goto err;
2965
2966 /* This may race, but that's ok, it only gets set */
2967 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
2968 }
2969 i915_gem_object_put(obj);
2970 - if (IS_ERR((void *)addr))
2971 - return addr;
2972
2973 args->addr_ptr = (uint64_t) addr;
2974 -
2975 return 0;
2976 +
2977 +err:
2978 + i915_gem_object_put(obj);
2979 + return addr;
2980 }
2981
2982 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
2983 @@ -5595,6 +5606,8 @@ err_uc_misc:
2984 i915_gem_cleanup_userptr(dev_priv);
2985
2986 if (ret == -EIO) {
2987 + mutex_lock(&dev_priv->drm.struct_mutex);
2988 +
2989 /*
2990 * Allow engine initialisation to fail by marking the GPU as
2991 * wedged. But we only want to do this where the GPU is angry,
2992 @@ -5605,7 +5618,14 @@ err_uc_misc:
2993 "Failed to initialize GPU, declaring it wedged!\n");
2994 i915_gem_set_wedged(dev_priv);
2995 }
2996 - ret = 0;
2997 +
2998 + /* Minimal basic recovery for KMS */
2999 + ret = i915_ggtt_enable_hw(dev_priv);
3000 + i915_gem_restore_gtt_mappings(dev_priv);
3001 + i915_gem_restore_fences(dev_priv);
3002 + intel_init_clock_gating(dev_priv);
3003 +
3004 + mutex_unlock(&dev_priv->drm.struct_mutex);
3005 }
3006
3007 i915_gem_drain_freed_objects(dev_priv);
3008 @@ -5615,6 +5635,7 @@ err_uc_misc:
3009 void i915_gem_fini(struct drm_i915_private *dev_priv)
3010 {
3011 i915_gem_suspend_late(dev_priv);
3012 + intel_disable_gt_powersave(dev_priv);
3013
3014 /* Flush any outstanding unpin_work. */
3015 i915_gem_drain_workqueue(dev_priv);
3016 @@ -5626,6 +5647,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
3017 i915_gem_contexts_fini(dev_priv);
3018 mutex_unlock(&dev_priv->drm.struct_mutex);
3019
3020 + intel_cleanup_gt_powersave(dev_priv);
3021 +
3022 intel_uc_fini_misc(dev_priv);
3023 i915_gem_cleanup_userptr(dev_priv);
3024
3025 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
3026 index 16f5d2d93801..4e070afb2738 100644
3027 --- a/drivers/gpu/drm/i915/i915_reg.h
3028 +++ b/drivers/gpu/drm/i915/i915_reg.h
3029 @@ -6531,7 +6531,7 @@ enum {
3030 #define PLANE_CTL_YUV422_UYVY (1 << 16)
3031 #define PLANE_CTL_YUV422_YVYU (2 << 16)
3032 #define PLANE_CTL_YUV422_VYUY (3 << 16)
3033 -#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
3034 +#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15)
3035 #define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
3036 #define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
3037 #define PLANE_CTL_TILED_MASK (0x7 << 10)
3038 diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
3039 index 29075c763428..7b4906ede148 100644
3040 --- a/drivers/gpu/drm/i915/intel_cdclk.c
3041 +++ b/drivers/gpu/drm/i915/intel_cdclk.c
3042 @@ -2208,6 +2208,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
3043 if (INTEL_GEN(dev_priv) >= 9)
3044 min_cdclk = max(2 * 96000, min_cdclk);
3045
3046 + /*
3047 + * "For DP audio configuration, cdclk frequency shall be set to
3048 + * meet the following requirements:
3049 + * DP Link Frequency(MHz) | Cdclk frequency(MHz)
3050 + * 270 | 320 or higher
3051 + * 162 | 200 or higher"
3052 + */
3053 + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3054 + intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
3055 + min_cdclk = max(crtc_state->port_clock, min_cdclk);
3056 +
3057 /*
3058 * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
3059 * than 320000KHz.
3060 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
3061 index 3bd44d042a1d..6902fd2da19c 100644
3062 --- a/drivers/gpu/drm/i915/intel_display.c
3063 +++ b/drivers/gpu/drm/i915/intel_display.c
3064 @@ -2712,6 +2712,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3065 if (size_aligned * 2 > dev_priv->stolen_usable_size)
3066 return false;
3067
3068 + switch (fb->modifier) {
3069 + case DRM_FORMAT_MOD_LINEAR:
3070 + case I915_FORMAT_MOD_X_TILED:
3071 + case I915_FORMAT_MOD_Y_TILED:
3072 + break;
3073 + default:
3074 + DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3075 + fb->modifier);
3076 + return false;
3077 + }
3078 +
3079 mutex_lock(&dev->struct_mutex);
3080 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3081 base_aligned,
3082 @@ -2721,8 +2732,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3083 if (!obj)
3084 return false;
3085
3086 - if (plane_config->tiling == I915_TILING_X)
3087 - obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
3088 + switch (plane_config->tiling) {
3089 + case I915_TILING_NONE:
3090 + break;
3091 + case I915_TILING_X:
3092 + case I915_TILING_Y:
3093 + obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3094 + break;
3095 + default:
3096 + MISSING_CASE(plane_config->tiling);
3097 + return false;
3098 + }
3099
3100 mode_cmd.pixel_format = fb->format->format;
3101 mode_cmd.width = fb->width;
3102 @@ -3561,11 +3581,11 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3103 case I915_FORMAT_MOD_Y_TILED:
3104 return PLANE_CTL_TILED_Y;
3105 case I915_FORMAT_MOD_Y_TILED_CCS:
3106 - return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE;
3107 + return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3108 case I915_FORMAT_MOD_Yf_TILED:
3109 return PLANE_CTL_TILED_YF;
3110 case I915_FORMAT_MOD_Yf_TILED_CCS:
3111 - return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE;
3112 + return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3113 default:
3114 MISSING_CASE(fb_modifier);
3115 }
3116 @@ -8812,13 +8832,14 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
3117 fb->modifier = I915_FORMAT_MOD_X_TILED;
3118 break;
3119 case PLANE_CTL_TILED_Y:
3120 - if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
3121 + plane_config->tiling = I915_TILING_Y;
3122 + if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
3123 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
3124 else
3125 fb->modifier = I915_FORMAT_MOD_Y_TILED;
3126 break;
3127 case PLANE_CTL_TILED_YF:
3128 - if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
3129 + if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
3130 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
3131 else
3132 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
3133 @@ -15951,8 +15972,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
3134 flush_work(&dev_priv->atomic_helper.free_work);
3135 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
3136
3137 - intel_disable_gt_powersave(dev_priv);
3138 -
3139 /*
3140 * Interrupts and polling as the first thing to avoid creating havoc.
3141 * Too much stuff here (turning of connectors, ...) would
3142 @@ -15980,8 +15999,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
3143
3144 intel_cleanup_overlay(dev_priv);
3145
3146 - intel_cleanup_gt_powersave(dev_priv);
3147 -
3148 intel_teardown_gmbus(dev_priv);
3149
3150 destroy_workqueue(dev_priv->modeset_wq);
3151 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
3152 index f92079e19de8..20cd4c8acecc 100644
3153 --- a/drivers/gpu/drm/i915/intel_dp.c
3154 +++ b/drivers/gpu/drm/i915/intel_dp.c
3155 @@ -4739,6 +4739,22 @@ intel_dp_long_pulse(struct intel_connector *connector,
3156 */
3157 status = connector_status_disconnected;
3158 goto out;
3159 + } else {
3160 + /*
3161 + * If display is now connected check links status,
3162 + * there has been known issues of link loss triggering
3163 + * long pulse.
3164 + *
3165 + * Some sinks (eg. ASUS PB287Q) seem to perform some
3166 + * weird HPD ping pong during modesets. So we can apparently
3167 + * end up with HPD going low during a modeset, and then
3168 + * going back up soon after. And once that happens we must
3169 + * retrain the link to get a picture. That's in case no
3170 + * userspace component reacted to intermittent HPD dip.
3171 + */
3172 + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3173 +
3174 + intel_dp_retrain_link(encoder, ctx);
3175 }
3176
3177 /*
3178 diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
3179 index 1fec0c71b4d9..58ba14966d4f 100644
3180 --- a/drivers/gpu/drm/i915/intel_dp_mst.c
3181 +++ b/drivers/gpu/drm/i915/intel_dp_mst.c
3182 @@ -408,8 +408,6 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
3183 struct intel_dp *intel_dp = intel_connector->mst_port;
3184 struct intel_crtc *crtc = to_intel_crtc(state->crtc);
3185
3186 - if (!READ_ONCE(connector->registered))
3187 - return NULL;
3188 return &intel_dp->mst_encoders[crtc->pipe]->base.base;
3189 }
3190
3191 diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
3192 index f889d41a281f..5e01bfb69d7a 100644
3193 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
3194 +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
3195 @@ -759,7 +759,8 @@ nv50_msto_enable(struct drm_encoder *encoder)
3196
3197 slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
3198 r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
3199 - WARN_ON(!r);
3200 + if (!r)
3201 + DRM_DEBUG_KMS("Failed to allocate VCPI\n");
3202
3203 if (!mstm->links++)
3204 nv50_outp_acquire(mstm->outp);
3205 diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
3206 index b1d41c4921dd..5fd94e206029 100644
3207 --- a/drivers/gpu/drm/panel/panel-simple.c
3208 +++ b/drivers/gpu/drm/panel/panel-simple.c
3209 @@ -436,6 +436,32 @@ static const struct panel_desc ampire_am800480r3tmqwa1h = {
3210 .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
3211 };
3212
3213 +static const struct display_timing santek_st0700i5y_rbslw_f_timing = {
3214 + .pixelclock = { 26400000, 33300000, 46800000 },
3215 + .hactive = { 800, 800, 800 },
3216 + .hfront_porch = { 16, 210, 354 },
3217 + .hback_porch = { 45, 36, 6 },
3218 + .hsync_len = { 1, 10, 40 },
3219 + .vactive = { 480, 480, 480 },
3220 + .vfront_porch = { 7, 22, 147 },
3221 + .vback_porch = { 22, 13, 3 },
3222 + .vsync_len = { 1, 10, 20 },
3223 + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
3224 + DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE
3225 +};
3226 +
3227 +static const struct panel_desc armadeus_st0700_adapt = {
3228 + .timings = &santek_st0700i5y_rbslw_f_timing,
3229 + .num_timings = 1,
3230 + .bpc = 6,
3231 + .size = {
3232 + .width = 154,
3233 + .height = 86,
3234 + },
3235 + .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
3236 + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
3237 +};
3238 +
3239 static const struct drm_display_mode auo_b101aw03_mode = {
3240 .clock = 51450,
3241 .hdisplay = 1024,
3242 @@ -2330,6 +2356,9 @@ static const struct of_device_id platform_of_match[] = {
3243 }, {
3244 .compatible = "ampire,am800480r3tmqwa1h",
3245 .data = &ampire_am800480r3tmqwa1h,
3246 + }, {
3247 + .compatible = "armadeus,st0700-adapt",
3248 + .data = &armadeus_st0700_adapt,
3249 }, {
3250 .compatible = "auo,b101aw03",
3251 .data = &auo_b101aw03,
3252 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
3253 index 59e9d05ab928..0af048d1a815 100644
3254 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
3255 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
3256 @@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
3257 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
3258 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
3259 kfree(reply);
3260 -
3261 + reply = NULL;
3262 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
3263 /* A checkpoint occurred. Retry. */
3264 continue;
3265 @@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
3266
3267 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
3268 kfree(reply);
3269 -
3270 + reply = NULL;
3271 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
3272 /* A checkpoint occurred. Retry. */
3273 continue;
3274 @@ -389,10 +389,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
3275 break;
3276 }
3277
3278 - if (retries == RETRIES) {
3279 - kfree(reply);
3280 + if (!reply)
3281 return -EINVAL;
3282 - }
3283
3284 *msg_len = reply_len;
3285 *msg = reply;
3286 diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
3287 index 5eed1e7da15c..d6106e1a0d4a 100644
3288 --- a/drivers/hv/hv_kvp.c
3289 +++ b/drivers/hv/hv_kvp.c
3290 @@ -353,7 +353,9 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
3291
3292 out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
3293
3294 - default:
3295 + /* fallthrough */
3296 +
3297 + case KVP_OP_GET_IP_INFO:
3298 utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
3299 MAX_ADAPTER_ID_SIZE,
3300 UTF16_LITTLE_ENDIAN,
3301 @@ -406,6 +408,10 @@ kvp_send_key(struct work_struct *dummy)
3302 process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
3303 break;
3304 case KVP_OP_GET_IP_INFO:
3305 + /*
3306 + * We only need to pass on the info of operation, adapter_id
3307 + * and addr_family to the userland kvp daemon.
3308 + */
3309 process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
3310 break;
3311 case KVP_OP_SET:
3312 @@ -421,7 +427,7 @@ kvp_send_key(struct work_struct *dummy)
3313 UTF16_LITTLE_ENDIAN,
3314 message->body.kvp_set.data.value,
3315 HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1) + 1;
3316 - break;
3317 + break;
3318
3319 case REG_U32:
3320 /*
3321 @@ -446,7 +452,10 @@ kvp_send_key(struct work_struct *dummy)
3322 break;
3323
3324 }
3325 - case KVP_OP_GET:
3326 +
3327 + /*
3328 + * The key is always a string - utf16 encoding.
3329 + */
3330 message->body.kvp_set.data.key_size =
3331 utf16s_to_utf8s(
3332 (wchar_t *)in_msg->body.kvp_set.data.key,
3333 @@ -454,7 +463,18 @@ kvp_send_key(struct work_struct *dummy)
3334 UTF16_LITTLE_ENDIAN,
3335 message->body.kvp_set.data.key,
3336 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
3337 - break;
3338 +
3339 + break;
3340 +
3341 + case KVP_OP_GET:
3342 + message->body.kvp_get.data.key_size =
3343 + utf16s_to_utf8s(
3344 + (wchar_t *)in_msg->body.kvp_get.data.key,
3345 + in_msg->body.kvp_get.data.key_size,
3346 + UTF16_LITTLE_ENDIAN,
3347 + message->body.kvp_get.data.key,
3348 + HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
3349 + break;
3350
3351 case KVP_OP_DELETE:
3352 message->body.kvp_delete.key_size =
3353 @@ -464,12 +484,12 @@ kvp_send_key(struct work_struct *dummy)
3354 UTF16_LITTLE_ENDIAN,
3355 message->body.kvp_delete.key,
3356 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
3357 - break;
3358 + break;
3359
3360 case KVP_OP_ENUMERATE:
3361 message->body.kvp_enum_data.index =
3362 in_msg->body.kvp_enum_data.index;
3363 - break;
3364 + break;
3365 }
3366
3367 kvp_transaction.state = HVUTIL_USERSPACE_REQ;
3368 diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
3369 index 3f3e8b3bf5ff..d51bf536bdf7 100644
3370 --- a/drivers/i2c/busses/i2c-at91.c
3371 +++ b/drivers/i2c/busses/i2c-at91.c
3372 @@ -270,9 +270,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
3373 writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
3374
3375 /* send stop when last byte has been written */
3376 - if (--dev->buf_len == 0)
3377 + if (--dev->buf_len == 0) {
3378 if (!dev->use_alt_cmd)
3379 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
3380 + at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
3381 + }
3382
3383 dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
3384
3385 @@ -690,9 +692,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
3386 } else {
3387 at91_twi_write_next_byte(dev);
3388 at91_twi_write(dev, AT91_TWI_IER,
3389 - AT91_TWI_TXCOMP |
3390 - AT91_TWI_NACK |
3391 - AT91_TWI_TXRDY);
3392 + AT91_TWI_TXCOMP | AT91_TWI_NACK |
3393 + (dev->buf_len ? AT91_TWI_TXRDY : 0));
3394 }
3395 }
3396
3397 @@ -913,7 +914,7 @@ static struct at91_twi_pdata sama5d4_config = {
3398
3399 static struct at91_twi_pdata sama5d2_config = {
3400 .clk_max_div = 7,
3401 - .clk_offset = 4,
3402 + .clk_offset = 3,
3403 .has_unre_flag = true,
3404 .has_alt_cmd = true,
3405 .has_hold_field = true,
3406 diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
3407 index 4be29ed44755..1ca2c4d39f87 100644
3408 --- a/drivers/iio/adc/exynos_adc.c
3409 +++ b/drivers/iio/adc/exynos_adc.c
3410 @@ -115,6 +115,8 @@
3411 #define MAX_ADC_V2_CHANNELS 10
3412 #define MAX_ADC_V1_CHANNELS 8
3413 #define MAX_EXYNOS3250_ADC_CHANNELS 2
3414 +#define MAX_EXYNOS4212_ADC_CHANNELS 4
3415 +#define MAX_S5PV210_ADC_CHANNELS 10
3416
3417 /* Bit definitions common for ADC_V1 and ADC_V2 */
3418 #define ADC_CON_EN_START (1u << 0)
3419 @@ -270,6 +272,19 @@ static void exynos_adc_v1_start_conv(struct exynos_adc *info,
3420 writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
3421 }
3422
3423 +/* Exynos4212 and 4412 is like ADCv1 but with four channels only */
3424 +static const struct exynos_adc_data exynos4212_adc_data = {
3425 + .num_channels = MAX_EXYNOS4212_ADC_CHANNELS,
3426 + .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
3427 + .needs_adc_phy = true,
3428 + .phy_offset = EXYNOS_ADCV1_PHY_OFFSET,
3429 +
3430 + .init_hw = exynos_adc_v1_init_hw,
3431 + .exit_hw = exynos_adc_v1_exit_hw,
3432 + .clear_irq = exynos_adc_v1_clear_irq,
3433 + .start_conv = exynos_adc_v1_start_conv,
3434 +};
3435 +
3436 static const struct exynos_adc_data exynos_adc_v1_data = {
3437 .num_channels = MAX_ADC_V1_CHANNELS,
3438 .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
3439 @@ -282,6 +297,16 @@ static const struct exynos_adc_data exynos_adc_v1_data = {
3440 .start_conv = exynos_adc_v1_start_conv,
3441 };
3442
3443 +static const struct exynos_adc_data exynos_adc_s5pv210_data = {
3444 + .num_channels = MAX_S5PV210_ADC_CHANNELS,
3445 + .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
3446 +
3447 + .init_hw = exynos_adc_v1_init_hw,
3448 + .exit_hw = exynos_adc_v1_exit_hw,
3449 + .clear_irq = exynos_adc_v1_clear_irq,
3450 + .start_conv = exynos_adc_v1_start_conv,
3451 +};
3452 +
3453 static void exynos_adc_s3c2416_start_conv(struct exynos_adc *info,
3454 unsigned long addr)
3455 {
3456 @@ -478,6 +503,12 @@ static const struct of_device_id exynos_adc_match[] = {
3457 }, {
3458 .compatible = "samsung,s3c6410-adc",
3459 .data = &exynos_adc_s3c64xx_data,
3460 + }, {
3461 + .compatible = "samsung,s5pv210-adc",
3462 + .data = &exynos_adc_s5pv210_data,
3463 + }, {
3464 + .compatible = "samsung,exynos4212-adc",
3465 + .data = &exynos4212_adc_data,
3466 }, {
3467 .compatible = "samsung,exynos-adc-v1",
3468 .data = &exynos_adc_v1_data,
3469 diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
3470 index dcb50172186f..f3a966ab35dc 100644
3471 --- a/drivers/iio/adc/rcar-gyroadc.c
3472 +++ b/drivers/iio/adc/rcar-gyroadc.c
3473 @@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
3474 dev_err(dev,
3475 "Only %i channels supported with %s, but reg = <%i>.\n",
3476 num_channels, child->name, reg);
3477 - return ret;
3478 + return -EINVAL;
3479 }
3480 }
3481
3482 @@ -400,7 +400,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
3483 dev_err(dev,
3484 "Channel %i uses different ADC mode than the rest.\n",
3485 reg);
3486 - return ret;
3487 + return -EINVAL;
3488 }
3489
3490 /* Channel is valid, grab the regulator. */
3491 diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
3492 index 50152c1b1004..357de3b4fddd 100644
3493 --- a/drivers/infiniband/core/uverbs_main.c
3494 +++ b/drivers/infiniband/core/uverbs_main.c
3495 @@ -265,6 +265,9 @@ void ib_uverbs_release_file(struct kref *ref)
3496 if (atomic_dec_and_test(&file->device->refcount))
3497 ib_uverbs_comp_dev(file->device);
3498
3499 + if (file->async_file)
3500 + kref_put(&file->async_file->ref,
3501 + ib_uverbs_release_async_event_file);
3502 kobject_put(&file->device->kobj);
3503 kfree(file);
3504 }
3505 @@ -915,10 +918,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
3506 }
3507 mutex_unlock(&file->device->lists_mutex);
3508
3509 - if (file->async_file)
3510 - kref_put(&file->async_file->ref,
3511 - ib_uverbs_release_async_event_file);
3512 -
3513 kref_put(&file->ref, ib_uverbs_release_file);
3514
3515 return 0;
3516 diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
3517 index 88e326d6cc49..d648a4167832 100644
3518 --- a/drivers/infiniband/hw/hfi1/sdma.c
3519 +++ b/drivers/infiniband/hw/hfi1/sdma.c
3520 @@ -410,10 +410,7 @@ static void sdma_flush(struct sdma_engine *sde)
3521 sdma_flush_descq(sde);
3522 spin_lock_irqsave(&sde->flushlist_lock, flags);
3523 /* copy flush list */
3524 - list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
3525 - list_del_init(&txp->list);
3526 - list_add_tail(&txp->list, &flushlist);
3527 - }
3528 + list_splice_init(&sde->flushlist, &flushlist);
3529 spin_unlock_irqrestore(&sde->flushlist_lock, flags);
3530 /* flush from flush list */
3531 list_for_each_entry_safe(txp, txp_next, &flushlist, list)
3532 @@ -2426,7 +2423,7 @@ unlock_noconn:
3533 wait->tx_count++;
3534 wait->count += tx->num_desc;
3535 }
3536 - schedule_work(&sde->flush_worker);
3537 + queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
3538 ret = -ECOMM;
3539 goto unlock;
3540 nodesc:
3541 @@ -2526,7 +2523,7 @@ unlock_noconn:
3542 }
3543 }
3544 spin_unlock(&sde->flushlist_lock);
3545 - schedule_work(&sde->flush_worker);
3546 + queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
3547 ret = -ECOMM;
3548 goto update_tail;
3549 nodesc:
3550 diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
3551 index 9e1cac8cb260..453e5c4ac19f 100644
3552 --- a/drivers/infiniband/hw/mlx5/odp.c
3553 +++ b/drivers/infiniband/hw/mlx5/odp.c
3554 @@ -497,7 +497,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
3555 static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
3556 u64 io_virt, size_t bcnt, u32 *bytes_mapped)
3557 {
3558 - u64 access_mask = ODP_READ_ALLOWED_BIT;
3559 + u64 access_mask;
3560 int npages = 0, page_shift, np;
3561 u64 start_idx, page_mask;
3562 struct ib_umem_odp *odp;
3563 @@ -522,6 +522,7 @@ next_mr:
3564 page_shift = mr->umem->page_shift;
3565 page_mask = ~(BIT(page_shift) - 1);
3566 start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
3567 + access_mask = ODP_READ_ALLOWED_BIT;
3568
3569 if (mr->umem->writable)
3570 access_mask |= ODP_WRITE_ALLOWED_BIT;
3571 diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
3572 index 2c1114ee0c6d..bc6a44a16445 100644
3573 --- a/drivers/infiniband/ulp/srp/ib_srp.c
3574 +++ b/drivers/infiniband/ulp/srp/ib_srp.c
3575 @@ -3401,13 +3401,17 @@ static const match_table_t srp_opt_tokens = {
3576
3577 /**
3578 * srp_parse_in - parse an IP address and port number combination
3579 + * @net: [in] Network namespace.
3580 + * @sa: [out] Address family, IP address and port number.
3581 + * @addr_port_str: [in] IP address and port number.
3582 + * @has_port: [out] Whether or not @addr_port_str includes a port number.
3583 *
3584 * Parse the following address formats:
3585 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3586 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3587 */
3588 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3589 - const char *addr_port_str)
3590 + const char *addr_port_str, bool *has_port)
3591 {
3592 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3593 char *port_str;
3594 @@ -3416,9 +3420,12 @@ static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3595 if (!addr)
3596 return -ENOMEM;
3597 port_str = strrchr(addr, ':');
3598 - if (!port_str)
3599 - return -EINVAL;
3600 - *port_str++ = '\0';
3601 + if (port_str && strchr(port_str, ']'))
3602 + port_str = NULL;
3603 + if (port_str)
3604 + *port_str++ = '\0';
3605 + if (has_port)
3606 + *has_port = port_str != NULL;
3607 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3608 if (ret && addr[0]) {
3609 addr_end = addr + strlen(addr) - 1;
3610 @@ -3440,6 +3447,7 @@ static int srp_parse_options(struct net *net, const char *buf,
3611 char *p;
3612 substring_t args[MAX_OPT_ARGS];
3613 unsigned long long ull;
3614 + bool has_port;
3615 int opt_mask = 0;
3616 int token;
3617 int ret = -EINVAL;
3618 @@ -3538,7 +3546,8 @@ static int srp_parse_options(struct net *net, const char *buf,
3619 ret = -ENOMEM;
3620 goto out;
3621 }
3622 - ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
3623 + ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3624 + NULL);
3625 if (ret < 0) {
3626 pr_warn("bad source parameter '%s'\n", p);
3627 kfree(p);
3628 @@ -3554,7 +3563,10 @@ static int srp_parse_options(struct net *net, const char *buf,
3629 ret = -ENOMEM;
3630 goto out;
3631 }
3632 - ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
3633 + ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3634 + &has_port);
3635 + if (!has_port)
3636 + ret = -EINVAL;
3637 if (ret < 0) {
3638 pr_warn("bad dest parameter '%s'\n", p);
3639 kfree(p);
3640 diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
3641 index 60348d707b99..9a576ae837dc 100644
3642 --- a/drivers/iommu/iova.c
3643 +++ b/drivers/iommu/iova.c
3644 @@ -148,8 +148,9 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
3645 struct iova *cached_iova;
3646
3647 cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
3648 - if (free->pfn_hi < iovad->dma_32bit_pfn &&
3649 - free->pfn_lo >= cached_iova->pfn_lo)
3650 + if (free == cached_iova ||
3651 + (free->pfn_hi < iovad->dma_32bit_pfn &&
3652 + free->pfn_lo >= cached_iova->pfn_lo))
3653 iovad->cached32_node = rb_next(&free->node);
3654
3655 cached_iova = rb_entry(iovad->cached_node, struct iova, node);
3656 diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
3657 index 3f4211b5cd33..45f684689c35 100644
3658 --- a/drivers/md/bcache/btree.c
3659 +++ b/drivers/md/bcache/btree.c
3660 @@ -35,7 +35,7 @@
3661 #include <linux/rcupdate.h>
3662 #include <linux/sched/clock.h>
3663 #include <linux/rculist.h>
3664 -
3665 +#include <linux/delay.h>
3666 #include <trace/events/bcache.h>
3667
3668 /*
3669 @@ -649,7 +649,25 @@ static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
3670 up(&b->io_mutex);
3671 }
3672
3673 +retry:
3674 + /*
3675 + * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
3676 + * __bch_btree_node_write(). To avoid an extra flush, acquire
3677 + * b->write_lock before checking BTREE_NODE_dirty bit.
3678 + */
3679 mutex_lock(&b->write_lock);
3680 + /*
3681 + * If this btree node is selected in btree_flush_write() by journal
3682 + * code, delay and retry until the node is flushed by journal code
3683 + * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
3684 + */
3685 + if (btree_node_journal_flush(b)) {
3686 + pr_debug("bnode %p is flushing by journal, retry", b);
3687 + mutex_unlock(&b->write_lock);
3688 + udelay(1);
3689 + goto retry;
3690 + }
3691 +
3692 if (btree_node_dirty(b))
3693 __bch_btree_node_write(b, &cl);
3694 mutex_unlock(&b->write_lock);
3695 @@ -772,10 +790,15 @@ void bch_btree_cache_free(struct cache_set *c)
3696 while (!list_empty(&c->btree_cache)) {
3697 b = list_first_entry(&c->btree_cache, struct btree, list);
3698
3699 - if (btree_node_dirty(b))
3700 + /*
3701 + * This function is called by cache_set_free(), no I/O
3702 + * request on cache now, it is unnecessary to acquire
3703 + * b->write_lock before clearing BTREE_NODE_dirty anymore.
3704 + */
3705 + if (btree_node_dirty(b)) {
3706 btree_complete_write(b, btree_current_write(b));
3707 - clear_bit(BTREE_NODE_dirty, &b->flags);
3708 -
3709 + clear_bit(BTREE_NODE_dirty, &b->flags);
3710 + }
3711 mca_data_free(b);
3712 }
3713
3714 @@ -1061,11 +1084,25 @@ static void btree_node_free(struct btree *b)
3715
3716 BUG_ON(b == b->c->root);
3717
3718 +retry:
3719 mutex_lock(&b->write_lock);
3720 + /*
3721 + * If the btree node is selected and flushing in btree_flush_write(),
3722 + * delay and retry until the BTREE_NODE_journal_flush bit cleared,
3723 + * then it is safe to free the btree node here. Otherwise this btree
3724 + * node will be in race condition.
3725 + */
3726 + if (btree_node_journal_flush(b)) {
3727 + mutex_unlock(&b->write_lock);
3728 + pr_debug("bnode %p journal_flush set, retry", b);
3729 + udelay(1);
3730 + goto retry;
3731 + }
3732
3733 - if (btree_node_dirty(b))
3734 + if (btree_node_dirty(b)) {
3735 btree_complete_write(b, btree_current_write(b));
3736 - clear_bit(BTREE_NODE_dirty, &b->flags);
3737 + clear_bit(BTREE_NODE_dirty, &b->flags);
3738 + }
3739
3740 mutex_unlock(&b->write_lock);
3741
3742 diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
3743 index a68d6c55783b..4d0cca145f69 100644
3744 --- a/drivers/md/bcache/btree.h
3745 +++ b/drivers/md/bcache/btree.h
3746 @@ -158,11 +158,13 @@ enum btree_flags {
3747 BTREE_NODE_io_error,
3748 BTREE_NODE_dirty,
3749 BTREE_NODE_write_idx,
3750 + BTREE_NODE_journal_flush,
3751 };
3752
3753 BTREE_FLAG(io_error);
3754 BTREE_FLAG(dirty);
3755 BTREE_FLAG(write_idx);
3756 +BTREE_FLAG(journal_flush);
3757
3758 static inline struct btree_write *btree_current_write(struct btree *b)
3759 {
3760 diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
3761 index c809724e6571..886710043025 100644
3762 --- a/drivers/md/bcache/extents.c
3763 +++ b/drivers/md/bcache/extents.c
3764 @@ -538,6 +538,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
3765 {
3766 struct btree *b = container_of(bk, struct btree, keys);
3767 unsigned int i, stale;
3768 + char buf[80];
3769
3770 if (!KEY_PTRS(k) ||
3771 bch_extent_invalid(bk, k))
3772 @@ -547,19 +548,19 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
3773 if (!ptr_available(b->c, k, i))
3774 return true;
3775
3776 - if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
3777 - return false;
3778 -
3779 for (i = 0; i < KEY_PTRS(k); i++) {
3780 stale = ptr_stale(b->c, k, i);
3781
3782 - btree_bug_on(stale > 96, b,
3783 + if (stale && KEY_DIRTY(k)) {
3784 + bch_extent_to_text(buf, sizeof(buf), k);
3785 + pr_info("stale dirty pointer, stale %u, key: %s",
3786 + stale, buf);
3787 + }
3788 +
3789 + btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
3790 "key too stale: %i, need_gc %u",
3791 stale, b->c->need_gc);
3792
3793 - btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
3794 - b, "stale dirty pointer");
3795 -
3796 if (stale)
3797 return true;
3798
3799 diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
3800 index ec1e35a62934..7bb15cddca5e 100644
3801 --- a/drivers/md/bcache/journal.c
3802 +++ b/drivers/md/bcache/journal.c
3803 @@ -404,6 +404,7 @@ static void btree_flush_write(struct cache_set *c)
3804 retry:
3805 best = NULL;
3806
3807 + mutex_lock(&c->bucket_lock);
3808 for_each_cached_btree(b, c, i)
3809 if (btree_current_write(b)->journal) {
3810 if (!best)
3811 @@ -416,9 +417,14 @@ retry:
3812 }
3813
3814 b = best;
3815 + if (b)
3816 + set_btree_node_journal_flush(b);
3817 + mutex_unlock(&c->bucket_lock);
3818 +
3819 if (b) {
3820 mutex_lock(&b->write_lock);
3821 if (!btree_current_write(b)->journal) {
3822 + clear_bit(BTREE_NODE_journal_flush, &b->flags);
3823 mutex_unlock(&b->write_lock);
3824 /* We raced */
3825 atomic_long_inc(&c->retry_flush_write);
3826 @@ -426,6 +432,7 @@ retry:
3827 }
3828
3829 __bch_btree_node_write(b, NULL);
3830 + clear_bit(BTREE_NODE_journal_flush, &b->flags);
3831 mutex_unlock(&b->write_lock);
3832 }
3833 }
3834 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
3835 index f3dcc7640319..34f5de13a93d 100644
3836 --- a/drivers/md/dm-crypt.c
3837 +++ b/drivers/md/dm-crypt.c
3838 @@ -949,6 +949,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
3839 {
3840 #ifdef CONFIG_BLK_DEV_INTEGRITY
3841 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
3842 + struct mapped_device *md = dm_table_get_md(ti->table);
3843
3844 /* From now we require underlying device with our integrity profile */
3845 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
3846 @@ -968,7 +969,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
3847
3848 if (crypt_integrity_aead(cc)) {
3849 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
3850 - DMINFO("Integrity AEAD, tag size %u, IV size %u.",
3851 + DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
3852 cc->integrity_tag_size, cc->integrity_iv_size);
3853
3854 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
3855 @@ -976,7 +977,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
3856 return -EINVAL;
3857 }
3858 } else if (cc->integrity_iv_size)
3859 - DMINFO("Additional per-sector space %u bytes for IV.",
3860 + DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
3861 cc->integrity_iv_size);
3862
3863 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
3864 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
3865 index baa966e2778c..481e54ded9dc 100644
3866 --- a/drivers/md/dm-mpath.c
3867 +++ b/drivers/md/dm-mpath.c
3868 @@ -554,8 +554,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
3869 return DM_MAPIO_REMAPPED;
3870 }
3871
3872 -static void multipath_release_clone(struct request *clone)
3873 +static void multipath_release_clone(struct request *clone,
3874 + union map_info *map_context)
3875 {
3876 + if (unlikely(map_context)) {
3877 + /*
3878 + * non-NULL map_context means caller is still map
3879 + * method; must undo multipath_clone_and_map()
3880 + */
3881 + struct dm_mpath_io *mpio = get_mpio(map_context);
3882 + struct pgpath *pgpath = mpio->pgpath;
3883 +
3884 + if (pgpath && pgpath->pg->ps.type->end_io)
3885 + pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
3886 + &pgpath->path,
3887 + mpio->nr_bytes);
3888 + }
3889 +
3890 blk_put_request(clone);
3891 }
3892
3893 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
3894 index 264b84e274aa..17c6a73c536c 100644
3895 --- a/drivers/md/dm-rq.c
3896 +++ b/drivers/md/dm-rq.c
3897 @@ -219,7 +219,7 @@ static void dm_end_request(struct request *clone, blk_status_t error)
3898 struct request *rq = tio->orig;
3899
3900 blk_rq_unprep_clone(clone);
3901 - tio->ti->type->release_clone_rq(clone);
3902 + tio->ti->type->release_clone_rq(clone, NULL);
3903
3904 rq_end_stats(md, rq);
3905 if (!rq->q->mq_ops)
3906 @@ -270,7 +270,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
3907 rq_end_stats(md, rq);
3908 if (tio->clone) {
3909 blk_rq_unprep_clone(tio->clone);
3910 - tio->ti->type->release_clone_rq(tio->clone);
3911 + tio->ti->type->release_clone_rq(tio->clone, NULL);
3912 }
3913
3914 if (!rq->q->mq_ops)
3915 @@ -495,7 +495,7 @@ check_again:
3916 case DM_MAPIO_REMAPPED:
3917 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
3918 /* -ENOMEM */
3919 - ti->type->release_clone_rq(clone);
3920 + ti->type->release_clone_rq(clone, &tio->info);
3921 return DM_MAPIO_REQUEUE;
3922 }
3923
3924 @@ -505,7 +505,7 @@ check_again:
3925 ret = dm_dispatch_clone_request(clone, rq);
3926 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
3927 blk_rq_unprep_clone(clone);
3928 - tio->ti->type->release_clone_rq(clone);
3929 + tio->ti->type->release_clone_rq(clone, &tio->info);
3930 tio->clone = NULL;
3931 if (!rq->q->mq_ops)
3932 r = DM_MAPIO_DELAY_REQUEUE;
3933 diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
3934 index 314d17ca6466..64dd0b34fcf4 100644
3935 --- a/drivers/md/dm-target.c
3936 +++ b/drivers/md/dm-target.c
3937 @@ -136,7 +136,8 @@ static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
3938 return DM_MAPIO_KILL;
3939 }
3940
3941 -static void io_err_release_clone_rq(struct request *clone)
3942 +static void io_err_release_clone_rq(struct request *clone,
3943 + union map_info *map_context)
3944 {
3945 }
3946
3947 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
3948 index ed3caceaed07..6a26afcc1fd6 100644
3949 --- a/drivers/md/dm-thin-metadata.c
3950 +++ b/drivers/md/dm-thin-metadata.c
3951 @@ -2001,16 +2001,19 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
3952
3953 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
3954 {
3955 - int r;
3956 + int r = -EINVAL;
3957 struct dm_block *sblock;
3958 struct thin_disk_superblock *disk_super;
3959
3960 down_write(&pmd->root_lock);
3961 + if (pmd->fail_io)
3962 + goto out;
3963 +
3964 pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
3965
3966 r = superblock_lock(pmd, &sblock);
3967 if (r) {
3968 - DMERR("couldn't read superblock");
3969 + DMERR("couldn't lock superblock");
3970 goto out;
3971 }
3972
3973 diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
3974 index 29a2ab9e77c5..ad8677d8c896 100644
3975 --- a/drivers/media/cec/Makefile
3976 +++ b/drivers/media/cec/Makefile
3977 @@ -1,5 +1,5 @@
3978 # SPDX-License-Identifier: GPL-2.0
3979 -cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
3980 +cec-objs := cec-core.o cec-adap.o cec-api.o
3981
3982 ifeq ($(CONFIG_CEC_NOTIFIER),y)
3983 cec-objs += cec-notifier.o
3984 diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
3985 index a7ea27d2aa8e..4a15d53f659e 100644
3986 --- a/drivers/media/cec/cec-adap.c
3987 +++ b/drivers/media/cec/cec-adap.c
3988 @@ -62,6 +62,19 @@ static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr
3989 return adap->log_addrs.primary_device_type[i < 0 ? 0 : i];
3990 }
3991
3992 +u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
3993 + unsigned int *offset)
3994 +{
3995 + unsigned int loc = cec_get_edid_spa_location(edid, size);
3996 +
3997 + if (offset)
3998 + *offset = loc;
3999 + if (loc == 0)
4000 + return CEC_PHYS_ADDR_INVALID;
4001 + return (edid[loc] << 8) | edid[loc + 1];
4002 +}
4003 +EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
4004 +
4005 /*
4006 * Queue a new event for this filehandle. If ts == 0, then set it
4007 * to the current time.
4008 diff --git a/drivers/media/cec/cec-edid.c b/drivers/media/cec/cec-edid.c
4009 deleted file mode 100644
4010 index f587e8eaefd8..000000000000
4011 --- a/drivers/media/cec/cec-edid.c
4012 +++ /dev/null
4013 @@ -1,95 +0,0 @@
4014 -// SPDX-License-Identifier: GPL-2.0-only
4015 -/*
4016 - * cec-edid - HDMI Consumer Electronics Control EDID & CEC helper functions
4017 - *
4018 - * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
4019 - */
4020 -
4021 -#include <linux/module.h>
4022 -#include <linux/kernel.h>
4023 -#include <linux/types.h>
4024 -#include <media/cec.h>
4025 -
4026 -u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
4027 - unsigned int *offset)
4028 -{
4029 - unsigned int loc = cec_get_edid_spa_location(edid, size);
4030 -
4031 - if (offset)
4032 - *offset = loc;
4033 - if (loc == 0)
4034 - return CEC_PHYS_ADDR_INVALID;
4035 - return (edid[loc] << 8) | edid[loc + 1];
4036 -}
4037 -EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
4038 -
4039 -void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
4040 -{
4041 - unsigned int loc = cec_get_edid_spa_location(edid, size);
4042 - u8 sum = 0;
4043 - unsigned int i;
4044 -
4045 - if (loc == 0)
4046 - return;
4047 - edid[loc] = phys_addr >> 8;
4048 - edid[loc + 1] = phys_addr & 0xff;
4049 - loc &= ~0x7f;
4050 -
4051 - /* update the checksum */
4052 - for (i = loc; i < loc + 127; i++)
4053 - sum += edid[i];
4054 - edid[i] = 256 - sum;
4055 -}
4056 -EXPORT_SYMBOL_GPL(cec_set_edid_phys_addr);
4057 -
4058 -u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
4059 -{
4060 - /* Check if input is sane */
4061 - if (WARN_ON(input == 0 || input > 0xf))
4062 - return CEC_PHYS_ADDR_INVALID;
4063 -
4064 - if (phys_addr == 0)
4065 - return input << 12;
4066 -
4067 - if ((phys_addr & 0x0fff) == 0)
4068 - return phys_addr | (input << 8);
4069 -
4070 - if ((phys_addr & 0x00ff) == 0)
4071 - return phys_addr | (input << 4);
4072 -
4073 - if ((phys_addr & 0x000f) == 0)
4074 - return phys_addr | input;
4075 -
4076 - /*
4077 - * All nibbles are used so no valid physical addresses can be assigned
4078 - * to the input.
4079 - */
4080 - return CEC_PHYS_ADDR_INVALID;
4081 -}
4082 -EXPORT_SYMBOL_GPL(cec_phys_addr_for_input);
4083 -
4084 -int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
4085 -{
4086 - int i;
4087 -
4088 - if (parent)
4089 - *parent = phys_addr;
4090 - if (port)
4091 - *port = 0;
4092 - if (phys_addr == CEC_PHYS_ADDR_INVALID)
4093 - return 0;
4094 - for (i = 0; i < 16; i += 4)
4095 - if (phys_addr & (0xf << i))
4096 - break;
4097 - if (i == 16)
4098 - return 0;
4099 - if (parent)
4100 - *parent = phys_addr & (0xfff0 << i);
4101 - if (port)
4102 - *port = (phys_addr >> i) & 0xf;
4103 - for (i += 4; i < 16; i += 4)
4104 - if ((phys_addr & (0xf << i)) == 0)
4105 - return -EINVAL;
4106 - return 0;
4107 -}
4108 -EXPORT_SYMBOL_GPL(cec_phys_addr_validate);
4109 diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
4110 index 63c9ac2c6a5f..8b1ae1d6680b 100644
4111 --- a/drivers/media/i2c/Kconfig
4112 +++ b/drivers/media/i2c/Kconfig
4113 @@ -60,8 +60,9 @@ config VIDEO_TDA1997X
4114 tristate "NXP TDA1997x HDMI receiver"
4115 depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
4116 depends on SND_SOC
4117 - select SND_PCM
4118 select HDMI
4119 + select SND_PCM
4120 + select V4L2_FWNODE
4121 ---help---
4122 V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
4123
4124 diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
4125 index f01964c36ad5..a4b0a89c7e7e 100644
4126 --- a/drivers/media/i2c/adv7604.c
4127 +++ b/drivers/media/i2c/adv7604.c
4128 @@ -2297,8 +2297,8 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
4129 edid->blocks = 2;
4130 return -E2BIG;
4131 }
4132 - pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, &spa_loc);
4133 - err = cec_phys_addr_validate(pa, &pa, NULL);
4134 + pa = v4l2_get_edid_phys_addr(edid->edid, edid->blocks * 128, &spa_loc);
4135 + err = v4l2_phys_addr_validate(pa, &pa, NULL);
4136 if (err)
4137 return err;
4138
4139 diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
4140 index bb43a75ed6d0..58662ba92d4f 100644
4141 --- a/drivers/media/i2c/adv7842.c
4142 +++ b/drivers/media/i2c/adv7842.c
4143 @@ -791,8 +791,8 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
4144 return 0;
4145 }
4146
4147 - pa = cec_get_edid_phys_addr(edid, 256, &spa_loc);
4148 - err = cec_phys_addr_validate(pa, &pa, NULL);
4149 + pa = v4l2_get_edid_phys_addr(edid, 256, &spa_loc);
4150 + err = v4l2_phys_addr_validate(pa, &pa, NULL);
4151 if (err)
4152 return err;
4153
4154 diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
4155 index 26070fb6ce4e..e4c0a27b636a 100644
4156 --- a/drivers/media/i2c/tc358743.c
4157 +++ b/drivers/media/i2c/tc358743.c
4158 @@ -1789,7 +1789,7 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
4159 return -E2BIG;
4160 }
4161 pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
4162 - err = cec_phys_addr_validate(pa, &pa, NULL);
4163 + err = v4l2_phys_addr_validate(pa, &pa, NULL);
4164 if (err)
4165 return err;
4166
4167 diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
4168 index d38682265892..1d9c028e52cb 100644
4169 --- a/drivers/media/platform/stm32/stm32-dcmi.c
4170 +++ b/drivers/media/platform/stm32/stm32-dcmi.c
4171 @@ -1681,7 +1681,7 @@ static int dcmi_probe(struct platform_device *pdev)
4172 if (irq <= 0) {
4173 if (irq != -EPROBE_DEFER)
4174 dev_err(&pdev->dev, "Could not get irq\n");
4175 - return irq;
4176 + return irq ? irq : -ENXIO;
4177 }
4178
4179 dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4180 diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
4181 index 462099a141e4..7b8cf661f238 100644
4182 --- a/drivers/media/platform/vim2m.c
4183 +++ b/drivers/media/platform/vim2m.c
4184 @@ -3,7 +3,8 @@
4185 *
4186 * This is a virtual device driver for testing mem-to-mem videobuf framework.
4187 * It simulates a device that uses memory buffers for both source and
4188 - * destination, processes the data and issues an "irq" (simulated by a timer).
4189 + * destination, processes the data and issues an "irq" (simulated by a delayed
4190 + * workqueue).
4191 * The device is capable of multi-instance, multi-buffer-per-transaction
4192 * operation (via the mem2mem framework).
4193 *
4194 @@ -19,7 +20,6 @@
4195 #include <linux/module.h>
4196 #include <linux/delay.h>
4197 #include <linux/fs.h>
4198 -#include <linux/timer.h>
4199 #include <linux/sched.h>
4200 #include <linux/slab.h>
4201
4202 @@ -148,7 +148,7 @@ struct vim2m_dev {
4203 struct mutex dev_mutex;
4204 spinlock_t irqlock;
4205
4206 - struct timer_list timer;
4207 + struct delayed_work work_run;
4208
4209 struct v4l2_m2m_dev *m2m_dev;
4210 };
4211 @@ -336,12 +336,6 @@ static int device_process(struct vim2m_ctx *ctx,
4212 return 0;
4213 }
4214
4215 -static void schedule_irq(struct vim2m_dev *dev, int msec_timeout)
4216 -{
4217 - dprintk(dev, "Scheduling a simulated irq\n");
4218 - mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
4219 -}
4220 -
4221 /*
4222 * mem2mem callbacks
4223 */
4224 @@ -387,13 +381,14 @@ static void device_run(void *priv)
4225
4226 device_process(ctx, src_buf, dst_buf);
4227
4228 - /* Run a timer, which simulates a hardware irq */
4229 - schedule_irq(dev, ctx->transtime);
4230 + /* Run delayed work, which simulates a hardware irq */
4231 + schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime));
4232 }
4233
4234 -static void device_isr(struct timer_list *t)
4235 +static void device_work(struct work_struct *w)
4236 {
4237 - struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer);
4238 + struct vim2m_dev *vim2m_dev =
4239 + container_of(w, struct vim2m_dev, work_run.work);
4240 struct vim2m_ctx *curr_ctx;
4241 struct vb2_v4l2_buffer *src_vb, *dst_vb;
4242 unsigned long flags;
4243 @@ -802,9 +797,13 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
4244 static void vim2m_stop_streaming(struct vb2_queue *q)
4245 {
4246 struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
4247 + struct vim2m_dev *dev = ctx->dev;
4248 struct vb2_v4l2_buffer *vbuf;
4249 unsigned long flags;
4250
4251 + if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
4252 + cancel_delayed_work_sync(&dev->work_run);
4253 +
4254 for (;;) {
4255 if (V4L2_TYPE_IS_OUTPUT(q->type))
4256 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
4257 @@ -1015,6 +1014,7 @@ static int vim2m_probe(struct platform_device *pdev)
4258 vfd = &dev->vfd;
4259 vfd->lock = &dev->dev_mutex;
4260 vfd->v4l2_dev = &dev->v4l2_dev;
4261 + INIT_DELAYED_WORK(&dev->work_run, device_work);
4262
4263 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
4264 if (ret) {
4265 @@ -1026,7 +1026,6 @@ static int vim2m_probe(struct platform_device *pdev)
4266 v4l2_info(&dev->v4l2_dev,
4267 "Device registered as /dev/video%d\n", vfd->num);
4268
4269 - timer_setup(&dev->timer, device_isr, 0);
4270 platform_set_drvdata(pdev, dev);
4271
4272 dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
4273 @@ -1083,7 +1082,6 @@ static int vim2m_remove(struct platform_device *pdev)
4274 media_device_cleanup(&dev->mdev);
4275 #endif
4276 v4l2_m2m_release(dev->m2m_dev);
4277 - del_timer_sync(&dev->timer);
4278 video_unregister_device(&dev->vfd);
4279 v4l2_device_unregister(&dev->v4l2_dev);
4280
4281 diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
4282 index 3b09ffceefd5..2e273f4dfc29 100644
4283 --- a/drivers/media/platform/vivid/vivid-vid-cap.c
4284 +++ b/drivers/media/platform/vivid/vivid-vid-cap.c
4285 @@ -1724,7 +1724,7 @@ int vidioc_s_edid(struct file *file, void *_fh,
4286 return -E2BIG;
4287 }
4288 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
4289 - ret = cec_phys_addr_validate(phys_addr, &phys_addr, NULL);
4290 + ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL);
4291 if (ret)
4292 return ret;
4293
4294 @@ -1740,7 +1740,7 @@ set_phys_addr:
4295
4296 for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
4297 cec_s_phys_addr(dev->cec_tx_adap[i],
4298 - cec_phys_addr_for_input(phys_addr, i + 1),
4299 + v4l2_phys_addr_for_input(phys_addr, i + 1),
4300 false);
4301 return 0;
4302 }
4303 diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
4304 index 2079861d2270..e108e9befb77 100644
4305 --- a/drivers/media/platform/vivid/vivid-vid-common.c
4306 +++ b/drivers/media/platform/vivid/vivid-vid-common.c
4307 @@ -863,7 +863,7 @@ int vidioc_g_edid(struct file *file, void *_fh,
4308 if (edid->blocks > dev->edid_blocks - edid->start_block)
4309 edid->blocks = dev->edid_blocks - edid->start_block;
4310 if (adap)
4311 - cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
4312 + v4l2_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
4313 memcpy(edid->edid, dev->edid + edid->start_block * 128, edid->blocks * 128);
4314 return 0;
4315 }
4316 diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
4317 index c7c600c1f63b..a24b40dfec97 100644
4318 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c
4319 +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
4320 @@ -15,6 +15,7 @@
4321 #include <media/v4l2-dv-timings.h>
4322 #include <linux/math64.h>
4323 #include <linux/hdmi.h>
4324 +#include <media/cec.h>
4325
4326 MODULE_AUTHOR("Hans Verkuil");
4327 MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
4328 @@ -942,3 +943,153 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
4329 return c;
4330 }
4331 EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry);
4332 +
4333 +/**
4334 + * v4l2_get_edid_phys_addr() - find and return the physical address
4335 + *
4336 + * @edid: pointer to the EDID data
4337 + * @size: size in bytes of the EDID data
4338 + * @offset: If not %NULL then the location of the physical address
4339 + * bytes in the EDID will be returned here. This is set to 0
4340 + * if there is no physical address found.
4341 + *
4342 + * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
4343 + */
4344 +u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
4345 + unsigned int *offset)
4346 +{
4347 + unsigned int loc = cec_get_edid_spa_location(edid, size);
4348 +
4349 + if (offset)
4350 + *offset = loc;
4351 + if (loc == 0)
4352 + return CEC_PHYS_ADDR_INVALID;
4353 + return (edid[loc] << 8) | edid[loc + 1];
4354 +}
4355 +EXPORT_SYMBOL_GPL(v4l2_get_edid_phys_addr);
4356 +
4357 +/**
4358 + * v4l2_set_edid_phys_addr() - find and set the physical address
4359 + *
4360 + * @edid: pointer to the EDID data
4361 + * @size: size in bytes of the EDID data
4362 + * @phys_addr: the new physical address
4363 + *
4364 + * This function finds the location of the physical address in the EDID
4365 + * and fills in the given physical address and updates the checksum
4366 + * at the end of the EDID block. It does nothing if the EDID doesn't
4367 + * contain a physical address.
4368 + */
4369 +void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
4370 +{
4371 + unsigned int loc = cec_get_edid_spa_location(edid, size);
4372 + u8 sum = 0;
4373 + unsigned int i;
4374 +
4375 + if (loc == 0)
4376 + return;
4377 + edid[loc] = phys_addr >> 8;
4378 + edid[loc + 1] = phys_addr & 0xff;
4379 + loc &= ~0x7f;
4380 +
4381 + /* update the checksum */
4382 + for (i = loc; i < loc + 127; i++)
4383 + sum += edid[i];
4384 + edid[i] = 256 - sum;
4385 +}
4386 +EXPORT_SYMBOL_GPL(v4l2_set_edid_phys_addr);
4387 +
4388 +/**
4389 + * v4l2_phys_addr_for_input() - calculate the PA for an input
4390 + *
4391 + * @phys_addr: the physical address of the parent
4392 + * @input: the number of the input port, must be between 1 and 15
4393 + *
4394 + * This function calculates a new physical address based on the input
4395 + * port number. For example:
4396 + *
4397 + * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
4398 + *
4399 + * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
4400 + *
4401 + * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
4402 + *
4403 + * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
4404 + *
4405 + * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
4406 + */
4407 +u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input)
4408 +{
4409 + /* Check if input is sane */
4410 + if (WARN_ON(input == 0 || input > 0xf))
4411 + return CEC_PHYS_ADDR_INVALID;
4412 +
4413 + if (phys_addr == 0)
4414 + return input << 12;
4415 +
4416 + if ((phys_addr & 0x0fff) == 0)
4417 + return phys_addr | (input << 8);
4418 +
4419 + if ((phys_addr & 0x00ff) == 0)
4420 + return phys_addr | (input << 4);
4421 +
4422 + if ((phys_addr & 0x000f) == 0)
4423 + return phys_addr | input;
4424 +
4425 + /*
4426 + * All nibbles are used so no valid physical addresses can be assigned
4427 + * to the input.
4428 + */
4429 + return CEC_PHYS_ADDR_INVALID;
4430 +}
4431 +EXPORT_SYMBOL_GPL(v4l2_phys_addr_for_input);
4432 +
4433 +/**
4434 + * v4l2_phys_addr_validate() - validate a physical address from an EDID
4435 + *
4436 + * @phys_addr: the physical address to validate
4437 + * @parent: if not %NULL, then this is filled with the parents PA.
4438 + * @port: if not %NULL, then this is filled with the input port.
4439 + *
4440 + * This validates a physical address as read from an EDID. If the
4441 + * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
4442 + * then it will return -EINVAL.
4443 + *
4444 + * The parent PA is passed into %parent and the input port is passed into
4445 + * %port. For example:
4446 + *
4447 + * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
4448 + *
4449 + * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
4450 + *
4451 + * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
4452 + *
4453 + * PA = f.f.f.f: has parent f.f.f.f and input port 0.
4454 + *
4455 + * Return: 0 if the PA is valid, -EINVAL if not.
4456 + */
4457 +int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
4458 +{
4459 + int i;
4460 +
4461 + if (parent)
4462 + *parent = phys_addr;
4463 + if (port)
4464 + *port = 0;
4465 + if (phys_addr == CEC_PHYS_ADDR_INVALID)
4466 + return 0;
4467 + for (i = 0; i < 16; i += 4)
4468 + if (phys_addr & (0xf << i))
4469 + break;
4470 + if (i == 16)
4471 + return 0;
4472 + if (parent)
4473 + *parent = phys_addr & (0xfff0 << i);
4474 + if (port)
4475 + *port = (phys_addr >> i) & 0xf;
4476 + for (i += 4; i < 16; i += 4)
4477 + if ((phys_addr & (0xf << i)) == 0)
4478 + return -EINVAL;
4479 + return 0;
4480 +}
4481 +EXPORT_SYMBOL_GPL(v4l2_phys_addr_validate);
4482 diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
4483 index 11841f4b7b2b..dd938a5d0409 100644
4484 --- a/drivers/mfd/Kconfig
4485 +++ b/drivers/mfd/Kconfig
4486 @@ -509,10 +509,10 @@ config INTEL_SOC_PMIC
4487 bool "Support for Crystal Cove PMIC"
4488 depends on ACPI && HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK
4489 depends on X86 || COMPILE_TEST
4490 + depends on I2C_DESIGNWARE_PLATFORM=y
4491 select MFD_CORE
4492 select REGMAP_I2C
4493 select REGMAP_IRQ
4494 - select I2C_DESIGNWARE_PLATFORM
4495 help
4496 Select this option to enable support for Crystal Cove PMIC
4497 on some Intel SoC systems. The PMIC provides ADC, GPIO,
4498 @@ -538,10 +538,10 @@ config INTEL_SOC_PMIC_CHTWC
4499 bool "Support for Intel Cherry Trail Whiskey Cove PMIC"
4500 depends on ACPI && HAS_IOMEM && I2C=y && COMMON_CLK
4501 depends on X86 || COMPILE_TEST
4502 + depends on I2C_DESIGNWARE_PLATFORM=y
4503 select MFD_CORE
4504 select REGMAP_I2C
4505 select REGMAP_IRQ
4506 - select I2C_DESIGNWARE_PLATFORM
4507 help
4508 Select this option to enable support for the Intel Cherry Trail
4509 Whiskey Cove PMIC found on some Intel Cherry Trail systems.
4510 @@ -1403,9 +1403,9 @@ config MFD_TPS65217
4511 config MFD_TPS68470
4512 bool "TI TPS68470 Power Management / LED chips"
4513 depends on ACPI && I2C=y
4514 + depends on I2C_DESIGNWARE_PLATFORM=y
4515 select MFD_CORE
4516 select REGMAP_I2C
4517 - select I2C_DESIGNWARE_PLATFORM
4518 help
4519 If you say yes here you get support for the TPS68470 series of
4520 Power Management / LED chips.
4521 diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
4522 index 45baf5d9120e..61f0faddfd88 100644
4523 --- a/drivers/mmc/host/renesas_sdhi_core.c
4524 +++ b/drivers/mmc/host/renesas_sdhi_core.c
4525 @@ -636,6 +636,13 @@ int renesas_sdhi_probe(struct platform_device *pdev,
4526 host->ops.card_busy = renesas_sdhi_card_busy;
4527 host->ops.start_signal_voltage_switch =
4528 renesas_sdhi_start_signal_voltage_switch;
4529 +
4530 + /* SDR and HS200/400 registers requires HW reset */
4531 + if (of_data && of_data->scc_offset) {
4532 + priv->scc_ctl = host->ctl + of_data->scc_offset;
4533 + host->mmc->caps |= MMC_CAP_HW_RESET;
4534 + host->hw_reset = renesas_sdhi_hw_reset;
4535 + }
4536 }
4537
4538 /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
4539 @@ -693,8 +700,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
4540 const struct renesas_sdhi_scc *taps = of_data->taps;
4541 bool hit = false;
4542
4543 - host->mmc->caps |= MMC_CAP_HW_RESET;
4544 -
4545 for (i = 0; i < of_data->taps_num; i++) {
4546 if (taps[i].clk_rate == 0 ||
4547 taps[i].clk_rate == host->mmc->f_max) {
4548 @@ -707,12 +712,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
4549 if (!hit)
4550 dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
4551
4552 - priv->scc_ctl = host->ctl + of_data->scc_offset;
4553 host->init_tuning = renesas_sdhi_init_tuning;
4554 host->prepare_tuning = renesas_sdhi_prepare_tuning;
4555 host->select_tuning = renesas_sdhi_select_tuning;
4556 host->check_scc_error = renesas_sdhi_check_scc_error;
4557 - host->hw_reset = renesas_sdhi_hw_reset;
4558 host->prepare_hs400_tuning =
4559 renesas_sdhi_prepare_hs400_tuning;
4560 host->hs400_downgrade = renesas_sdhi_disable_scc;
4561 diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
4562 index c4115bae5db1..71794391f48f 100644
4563 --- a/drivers/mmc/host/sdhci-pci-core.c
4564 +++ b/drivers/mmc/host/sdhci-pci-core.c
4565 @@ -1577,6 +1577,8 @@ static const struct pci_device_id pci_ids[] = {
4566 SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd),
4567 SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc),
4568 SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd),
4569 + SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
4570 + SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
4571 SDHCI_PCI_DEVICE(O2, 8120, o2),
4572 SDHCI_PCI_DEVICE(O2, 8220, o2),
4573 SDHCI_PCI_DEVICE(O2, 8221, o2),
4574 diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
4575 index 2ef0bdca9197..6f04a62b2998 100644
4576 --- a/drivers/mmc/host/sdhci-pci.h
4577 +++ b/drivers/mmc/host/sdhci-pci.h
4578 @@ -50,6 +50,8 @@
4579 #define PCI_DEVICE_ID_INTEL_CNPH_SD 0xa375
4580 #define PCI_DEVICE_ID_INTEL_ICP_EMMC 0x34c4
4581 #define PCI_DEVICE_ID_INTEL_ICP_SD 0x34f8
4582 +#define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4
4583 +#define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5
4584
4585 #define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
4586 #define PCI_DEVICE_ID_VIA_95D0 0x95d0
4587 diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
4588 index 91ca77c7571c..b4347806a59e 100644
4589 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
4590 +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
4591 @@ -77,10 +77,13 @@
4592 #define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
4593 #define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
4594 #define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
4595 -#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
4596 +#define IWL_22000_HR_B_F0_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
4597 +#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
4598 +#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
4599 #define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
4600 #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
4601 #define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
4602 +#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
4603
4604 #define IWL_22000_HR_MODULE_FIRMWARE(api) \
4605 IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
4606 @@ -88,7 +91,11 @@
4607 IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
4608 #define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
4609 IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
4610 -#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
4611 +#define IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(api) \
4612 + IWL_22000_HR_B_F0_FW_PRE __stringify(api) ".ucode"
4613 +#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \
4614 + IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
4615 +#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
4616 IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
4617 #define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
4618 IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
4619 @@ -96,6 +103,8 @@
4620 IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
4621 #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
4622 IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
4623 +#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
4624 + IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
4625
4626 #define NVM_HW_SECTION_NUM_FAMILY_22000 10
4627
4628 @@ -190,7 +199,54 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = {
4629
4630 const struct iwl_cfg iwl22000_2ax_cfg_hr = {
4631 .name = "Intel(R) Dual Band Wireless AX 22000",
4632 - .fw_name_pre = IWL_22000_HR_FW_PRE,
4633 + .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
4634 + IWL_DEVICE_22500,
4635 + /*
4636 + * This device doesn't support receiving BlockAck with a large bitmap
4637 + * so we need to restrict the size of transmitted aggregation to the
4638 + * HT size; mac80211 would otherwise pick the HE max (256) by default.
4639 + */
4640 + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
4641 +};
4642 +
4643 +/*
4644 + * All JF radio modules are part of the 9000 series, but the MAC part
4645 + * looks more like 22000. That's why this device is here, but called
4646 + * 9560 nevertheless.
4647 + */
4648 +const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = {
4649 + .name = "Intel(R) Wireless-AC 9461",
4650 + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
4651 + IWL_DEVICE_22500,
4652 +};
4653 +
4654 +const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = {
4655 + .name = "Intel(R) Wireless-AC 9462",
4656 + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
4657 + IWL_DEVICE_22500,
4658 +};
4659 +
4660 +const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
4661 + .name = "Intel(R) Wireless-AC 9560",
4662 + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
4663 + IWL_DEVICE_22500,
4664 +};
4665 +
4666 +const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
4667 + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
4668 + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
4669 + IWL_DEVICE_22500,
4670 +};
4671 +
4672 +const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
4673 + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
4674 + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
4675 + IWL_DEVICE_22500,
4676 +};
4677 +
4678 +const struct iwl_cfg iwl22000_2ax_cfg_jf = {
4679 + .name = "Intel(R) Dual Band Wireless AX 22000",
4680 + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
4681 IWL_DEVICE_22500,
4682 /*
4683 * This device doesn't support receiving BlockAck with a large bitmap
4684 @@ -264,7 +320,10 @@ const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
4685 MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4686 MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4687 MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4688 +MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4689 +MODULE_FIRMWARE(IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4690 MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4691 MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4692 MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4693 MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4694 +MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4695 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
4696 index 12fddcf15bab..2e9fd7a30398 100644
4697 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
4698 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
4699 @@ -574,11 +574,18 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
4700 extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
4701 extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
4702 extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
4703 +extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
4704 +extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
4705 +extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
4706 +extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
4707 +extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
4708 +extern const struct iwl_cfg iwl22000_2ax_cfg_jf;
4709 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
4710 +extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
4711 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
4712 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
4713 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
4714 extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
4715 -#endif /* CONFIG_IWLMVM */
4716 +#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
4717
4718 #endif /* __IWL_CONFIG_H__ */
4719 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4720 index 5d65500a8aa7..0982bd99b1c3 100644
4721 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4722 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4723 @@ -601,6 +601,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4724 {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
4725 {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
4726 {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
4727 + {IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_cfg)},
4728 {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
4729 {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)},
4730 {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
4731 @@ -696,34 +697,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4732 {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
4733 {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
4734 {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
4735 - {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
4736 - {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
4737 - {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)},
4738 - {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)},
4739 - {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)},
4740 - {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)},
4741 - {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)},
4742 - {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)},
4743 - {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)},
4744 - {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)},
4745 - {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)},
4746 - {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)},
4747 - {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)},
4748 - {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)},
4749 - {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)},
4750 - {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
4751 - {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
4752 - {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
4753 - {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
4754 - {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
4755 - {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
4756 - {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
4757 - {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
4758 - {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
4759 - {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)},
4760 - {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)},
4761 - {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)},
4762 - {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)},
4763 +
4764 + {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4765 + {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4766 + {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4767 + {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4768 + {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4769 + {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4770 + {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4771 + {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4772 + {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4773 + {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4774 + {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4775 + {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4776 + {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4777 + {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4778 + {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4779 + {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4780 + {IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
4781 + {IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
4782 + {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4783 + {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4784 + {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4785 + {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4786 + {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4787 + {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4788 + {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4789 +
4790 {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
4791 {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
4792 {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
4793 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
4794 index 6542644bc325..cec31f0c3017 100644
4795 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
4796 +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
4797 @@ -402,7 +402,7 @@ void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
4798 ccmp_pn[6] = pn >> 32;
4799 ccmp_pn[7] = pn >> 40;
4800 txwi->iv = *((__le32 *)&ccmp_pn[0]);
4801 - txwi->eiv = *((__le32 *)&ccmp_pn[1]);
4802 + txwi->eiv = *((__le32 *)&ccmp_pn[4]);
4803 }
4804
4805 spin_lock_bh(&dev->mt76.lock);
4806 diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
4807 index 67dec8860bf3..565bddcfd130 100644
4808 --- a/drivers/nvme/host/fc.c
4809 +++ b/drivers/nvme/host/fc.c
4810 @@ -206,7 +206,7 @@ static LIST_HEAD(nvme_fc_lport_list);
4811 static DEFINE_IDA(nvme_fc_local_port_cnt);
4812 static DEFINE_IDA(nvme_fc_ctrl_cnt);
4813
4814 -
4815 +static struct workqueue_struct *nvme_fc_wq;
4816
4817 /*
4818 * These items are short-term. They will eventually be moved into
4819 @@ -2053,7 +2053,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
4820 */
4821 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
4822 active = atomic_xchg(&ctrl->err_work_active, 1);
4823 - if (!active && !schedule_work(&ctrl->err_work)) {
4824 + if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
4825 atomic_set(&ctrl->err_work_active, 0);
4826 WARN_ON(1);
4827 }
4828 @@ -3321,6 +3321,10 @@ static int __init nvme_fc_init_module(void)
4829 {
4830 int ret;
4831
4832 + nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
4833 + if (!nvme_fc_wq)
4834 + return -ENOMEM;
4835 +
4836 /*
4837 * NOTE:
4838 * It is expected that in the future the kernel will combine
4839 @@ -3338,7 +3342,8 @@ static int __init nvme_fc_init_module(void)
4840 fc_class = class_create(THIS_MODULE, "fc");
4841 if (IS_ERR(fc_class)) {
4842 pr_err("couldn't register class fc\n");
4843 - return PTR_ERR(fc_class);
4844 + ret = PTR_ERR(fc_class);
4845 + goto out_destroy_wq;
4846 }
4847
4848 /*
4849 @@ -3362,6 +3367,9 @@ out_destroy_device:
4850 device_destroy(fc_class, MKDEV(0, 0));
4851 out_destroy_class:
4852 class_destroy(fc_class);
4853 +out_destroy_wq:
4854 + destroy_workqueue(nvme_fc_wq);
4855 +
4856 return ret;
4857 }
4858
4859 @@ -3378,6 +3386,7 @@ static void __exit nvme_fc_exit_module(void)
4860
4861 device_destroy(fc_class, MKDEV(0, 0));
4862 class_destroy(fc_class);
4863 + destroy_workqueue(nvme_fc_wq);
4864 }
4865
4866 module_init(nvme_fc_init_module);
4867 diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
4868 index acd50920c2ff..b57ee79f6d69 100644
4869 --- a/drivers/pci/controller/dwc/pcie-designware-host.c
4870 +++ b/drivers/pci/controller/dwc/pcie-designware-host.c
4871 @@ -356,7 +356,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
4872 dev_err(dev, "Missing *config* reg space\n");
4873 }
4874
4875 - bridge = pci_alloc_host_bridge(0);
4876 + bridge = devm_pci_alloc_host_bridge(dev, 0);
4877 if (!bridge)
4878 return -ENOMEM;
4879
4880 @@ -367,7 +367,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
4881
4882 ret = devm_request_pci_bus_resources(dev, &bridge->windows);
4883 if (ret)
4884 - goto error;
4885 + return ret;
4886
4887 /* Get the I/O and memory ranges from DT */
4888 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
4889 @@ -411,8 +411,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
4890 resource_size(pp->cfg));
4891 if (!pci->dbi_base) {
4892 dev_err(dev, "Error with ioremap\n");
4893 - ret = -ENOMEM;
4894 - goto error;
4895 + return -ENOMEM;
4896 }
4897 }
4898
4899 @@ -423,8 +422,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
4900 pp->cfg0_base, pp->cfg0_size);
4901 if (!pp->va_cfg0_base) {
4902 dev_err(dev, "Error with ioremap in function\n");
4903 - ret = -ENOMEM;
4904 - goto error;
4905 + return -ENOMEM;
4906 }
4907 }
4908
4909 @@ -434,8 +432,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
4910 pp->cfg1_size);
4911 if (!pp->va_cfg1_base) {
4912 dev_err(dev, "Error with ioremap\n");
4913 - ret = -ENOMEM;
4914 - goto error;
4915 + return -ENOMEM;
4916 }
4917 }
4918
4919 @@ -458,14 +455,14 @@ int dw_pcie_host_init(struct pcie_port *pp)
4920 pp->num_vectors == 0) {
4921 dev_err(dev,
4922 "Invalid number of vectors\n");
4923 - goto error;
4924 + return -EINVAL;
4925 }
4926 }
4927
4928 if (!pp->ops->msi_host_init) {
4929 ret = dw_pcie_allocate_domains(pp);
4930 if (ret)
4931 - goto error;
4932 + return ret;
4933
4934 if (pp->msi_irq)
4935 irq_set_chained_handler_and_data(pp->msi_irq,
4936 @@ -474,7 +471,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
4937 } else {
4938 ret = pp->ops->msi_host_init(pp);
4939 if (ret < 0)
4940 - goto error;
4941 + return ret;
4942 }
4943 }
4944
4945 @@ -514,8 +511,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
4946 err_free_msi:
4947 if (pci_msi_enabled() && !pp->ops->msi_host_init)
4948 dw_pcie_free_msi(pp);
4949 -error:
4950 - pci_free_host_bridge(bridge);
4951 return ret;
4952 }
4953
4954 diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
4955 index 87a8887fd4d3..e292801fff7f 100644
4956 --- a/drivers/pci/controller/dwc/pcie-qcom.c
4957 +++ b/drivers/pci/controller/dwc/pcie-qcom.c
4958 @@ -1091,7 +1091,6 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
4959 struct qcom_pcie *pcie = to_qcom_pcie(pci);
4960 int ret;
4961
4962 - pm_runtime_get_sync(pci->dev);
4963 qcom_ep_reset_assert(pcie);
4964
4965 ret = pcie->ops->init(pcie);
4966 @@ -1128,7 +1127,6 @@ err_disable_phy:
4967 phy_power_off(pcie->phy);
4968 err_deinit:
4969 pcie->ops->deinit(pcie);
4970 - pm_runtime_put(pci->dev);
4971
4972 return ret;
4973 }
4974 @@ -1218,6 +1216,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
4975 return -ENOMEM;
4976
4977 pm_runtime_enable(dev);
4978 + ret = pm_runtime_get_sync(dev);
4979 + if (ret < 0) {
4980 + pm_runtime_disable(dev);
4981 + return ret;
4982 + }
4983 +
4984 pci->dev = dev;
4985 pci->ops = &dw_pcie_ops;
4986 pp = &pci->pp;
4987 @@ -1226,45 +1230,57 @@ static int qcom_pcie_probe(struct platform_device *pdev)
4988
4989 pcie->ops = of_device_get_match_data(dev);
4990
4991 - pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
4992 - if (IS_ERR(pcie->reset))
4993 - return PTR_ERR(pcie->reset);
4994 + pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
4995 + if (IS_ERR(pcie->reset)) {
4996 + ret = PTR_ERR(pcie->reset);
4997 + goto err_pm_runtime_put;
4998 + }
4999
5000 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
5001 pcie->parf = devm_ioremap_resource(dev, res);
5002 - if (IS_ERR(pcie->parf))
5003 - return PTR_ERR(pcie->parf);
5004 + if (IS_ERR(pcie->parf)) {
5005 + ret = PTR_ERR(pcie->parf);
5006 + goto err_pm_runtime_put;
5007 + }
5008
5009 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
5010 pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
5011 - if (IS_ERR(pci->dbi_base))
5012 - return PTR_ERR(pci->dbi_base);
5013 + if (IS_ERR(pci->dbi_base)) {
5014 + ret = PTR_ERR(pci->dbi_base);
5015 + goto err_pm_runtime_put;
5016 + }
5017
5018 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
5019 pcie->elbi = devm_ioremap_resource(dev, res);
5020 - if (IS_ERR(pcie->elbi))
5021 - return PTR_ERR(pcie->elbi);
5022 + if (IS_ERR(pcie->elbi)) {
5023 + ret = PTR_ERR(pcie->elbi);
5024 + goto err_pm_runtime_put;
5025 + }
5026
5027 pcie->phy = devm_phy_optional_get(dev, "pciephy");
5028 - if (IS_ERR(pcie->phy))
5029 - return PTR_ERR(pcie->phy);
5030 + if (IS_ERR(pcie->phy)) {
5031 + ret = PTR_ERR(pcie->phy);
5032 + goto err_pm_runtime_put;
5033 + }
5034
5035 ret = pcie->ops->get_resources(pcie);
5036 if (ret)
5037 - return ret;
5038 + goto err_pm_runtime_put;
5039
5040 pp->ops = &qcom_pcie_dw_ops;
5041
5042 if (IS_ENABLED(CONFIG_PCI_MSI)) {
5043 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
5044 - if (pp->msi_irq < 0)
5045 - return pp->msi_irq;
5046 + if (pp->msi_irq < 0) {
5047 + ret = pp->msi_irq;
5048 + goto err_pm_runtime_put;
5049 + }
5050 }
5051
5052 ret = phy_init(pcie->phy);
5053 if (ret) {
5054 pm_runtime_disable(&pdev->dev);
5055 - return ret;
5056 + goto err_pm_runtime_put;
5057 }
5058
5059 platform_set_drvdata(pdev, pcie);
5060 @@ -1273,10 +1289,16 @@ static int qcom_pcie_probe(struct platform_device *pdev)
5061 if (ret) {
5062 dev_err(dev, "cannot initialize host\n");
5063 pm_runtime_disable(&pdev->dev);
5064 - return ret;
5065 + goto err_pm_runtime_put;
5066 }
5067
5068 return 0;
5069 +
5070 +err_pm_runtime_put:
5071 + pm_runtime_put(dev);
5072 + pm_runtime_disable(dev);
5073 +
5074 + return ret;
5075 }
5076
5077 static const struct of_device_id qcom_pcie_match[] = {
5078 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
5079 index 28c64f84bfe7..06be52912dcd 100644
5080 --- a/drivers/pci/quirks.c
5081 +++ b/drivers/pci/quirks.c
5082 @@ -5082,59 +5082,95 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
5083 pci_iounmap(pdev, mmio);
5084 pci_disable_device(pdev);
5085 }
5086 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8531,
5087 - quirk_switchtec_ntb_dma_alias);
5088 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8532,
5089 - quirk_switchtec_ntb_dma_alias);
5090 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8533,
5091 - quirk_switchtec_ntb_dma_alias);
5092 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8534,
5093 - quirk_switchtec_ntb_dma_alias);
5094 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8535,
5095 - quirk_switchtec_ntb_dma_alias);
5096 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8536,
5097 - quirk_switchtec_ntb_dma_alias);
5098 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8543,
5099 - quirk_switchtec_ntb_dma_alias);
5100 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8544,
5101 - quirk_switchtec_ntb_dma_alias);
5102 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8545,
5103 - quirk_switchtec_ntb_dma_alias);
5104 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8546,
5105 - quirk_switchtec_ntb_dma_alias);
5106 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8551,
5107 - quirk_switchtec_ntb_dma_alias);
5108 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8552,
5109 - quirk_switchtec_ntb_dma_alias);
5110 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8553,
5111 - quirk_switchtec_ntb_dma_alias);
5112 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8554,
5113 - quirk_switchtec_ntb_dma_alias);
5114 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8555,
5115 - quirk_switchtec_ntb_dma_alias);
5116 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8556,
5117 - quirk_switchtec_ntb_dma_alias);
5118 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8561,
5119 - quirk_switchtec_ntb_dma_alias);
5120 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8562,
5121 - quirk_switchtec_ntb_dma_alias);
5122 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8563,
5123 - quirk_switchtec_ntb_dma_alias);
5124 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8564,
5125 - quirk_switchtec_ntb_dma_alias);
5126 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8565,
5127 - quirk_switchtec_ntb_dma_alias);
5128 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8566,
5129 - quirk_switchtec_ntb_dma_alias);
5130 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8571,
5131 - quirk_switchtec_ntb_dma_alias);
5132 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8572,
5133 - quirk_switchtec_ntb_dma_alias);
5134 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8573,
5135 - quirk_switchtec_ntb_dma_alias);
5136 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8574,
5137 - quirk_switchtec_ntb_dma_alias);
5138 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8575,
5139 - quirk_switchtec_ntb_dma_alias);
5140 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8576,
5141 - quirk_switchtec_ntb_dma_alias);
5142 +#define SWITCHTEC_QUIRK(vid) \
5143 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
5144 + quirk_switchtec_ntb_dma_alias)
5145 +
5146 +SWITCHTEC_QUIRK(0x8531); /* PFX 24xG3 */
5147 +SWITCHTEC_QUIRK(0x8532); /* PFX 32xG3 */
5148 +SWITCHTEC_QUIRK(0x8533); /* PFX 48xG3 */
5149 +SWITCHTEC_QUIRK(0x8534); /* PFX 64xG3 */
5150 +SWITCHTEC_QUIRK(0x8535); /* PFX 80xG3 */
5151 +SWITCHTEC_QUIRK(0x8536); /* PFX 96xG3 */
5152 +SWITCHTEC_QUIRK(0x8541); /* PSX 24xG3 */
5153 +SWITCHTEC_QUIRK(0x8542); /* PSX 32xG3 */
5154 +SWITCHTEC_QUIRK(0x8543); /* PSX 48xG3 */
5155 +SWITCHTEC_QUIRK(0x8544); /* PSX 64xG3 */
5156 +SWITCHTEC_QUIRK(0x8545); /* PSX 80xG3 */
5157 +SWITCHTEC_QUIRK(0x8546); /* PSX 96xG3 */
5158 +SWITCHTEC_QUIRK(0x8551); /* PAX 24XG3 */
5159 +SWITCHTEC_QUIRK(0x8552); /* PAX 32XG3 */
5160 +SWITCHTEC_QUIRK(0x8553); /* PAX 48XG3 */
5161 +SWITCHTEC_QUIRK(0x8554); /* PAX 64XG3 */
5162 +SWITCHTEC_QUIRK(0x8555); /* PAX 80XG3 */
5163 +SWITCHTEC_QUIRK(0x8556); /* PAX 96XG3 */
5164 +SWITCHTEC_QUIRK(0x8561); /* PFXL 24XG3 */
5165 +SWITCHTEC_QUIRK(0x8562); /* PFXL 32XG3 */
5166 +SWITCHTEC_QUIRK(0x8563); /* PFXL 48XG3 */
5167 +SWITCHTEC_QUIRK(0x8564); /* PFXL 64XG3 */
5168 +SWITCHTEC_QUIRK(0x8565); /* PFXL 80XG3 */
5169 +SWITCHTEC_QUIRK(0x8566); /* PFXL 96XG3 */
5170 +SWITCHTEC_QUIRK(0x8571); /* PFXI 24XG3 */
5171 +SWITCHTEC_QUIRK(0x8572); /* PFXI 32XG3 */
5172 +SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
5173 +SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
5174 +SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
5175 +SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
5176 +
5177 +/*
5178 + * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
5179 + * not always reset the secondary Nvidia GPU between reboots if the system
5180 + * is configured to use Hybrid Graphics mode. This results in the GPU
5181 + * being left in whatever state it was in during the *previous* boot, which
5182 + * causes spurious interrupts from the GPU, which in turn causes us to
5183 + * disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly,
5184 + * this also completely breaks nouveau.
5185 + *
5186 + * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a
5187 + * clean state and fixes all these issues.
5188 + *
5189 + * When the machine is configured in Dedicated display mode, the issue
5190 + * doesn't occur. Fortunately the GPU advertises NoReset+ when in this
5191 + * mode, so we can detect that and avoid resetting it.
5192 + */
5193 +static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5194 +{
5195 + void __iomem *map;
5196 + int ret;
5197 +
5198 + if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
5199 + pdev->subsystem_device != 0x222e ||
5200 + !pdev->reset_fn)
5201 + return;
5202 +
5203 + if (pci_enable_device_mem(pdev))
5204 + return;
5205 +
5206 + /*
5207 + * Based on nvkm_device_ctor() in
5208 + * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
5209 + */
5210 + map = pci_iomap(pdev, 0, 0x23000);
5211 + if (!map) {
5212 + pci_err(pdev, "Can't map MMIO space\n");
5213 + goto out_disable;
5214 + }
5215 +
5216 + /*
5217 + * Make sure the GPU looks like it's been POSTed before resetting
5218 + * it.
5219 + */
5220 + if (ioread32(map + 0x2240c) & 0x2) {
5221 + pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5222 + ret = pci_reset_bus(pdev);
5223 + if (ret < 0)
5224 + pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5225 + }
5226 +
5227 + iounmap(map);
5228 +out_disable:
5229 + pci_disable_device(pdev);
5230 +}
5231 +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
5232 + PCI_CLASS_DISPLAY_VGA, 8,
5233 + quirk_reset_lenovo_thinkpad_p50_nvgpu);
5234 diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
5235 index e9ab90c19304..602af839421d 100644
5236 --- a/drivers/remoteproc/qcom_q6v5.c
5237 +++ b/drivers/remoteproc/qcom_q6v5.c
5238 @@ -188,6 +188,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
5239 init_completion(&q6v5->stop_done);
5240
5241 q6v5->wdog_irq = platform_get_irq_byname(pdev, "wdog");
5242 + if (q6v5->wdog_irq < 0) {
5243 + if (q6v5->wdog_irq != -EPROBE_DEFER)
5244 + dev_err(&pdev->dev,
5245 + "failed to retrieve wdog IRQ: %d\n",
5246 + q6v5->wdog_irq);
5247 + return q6v5->wdog_irq;
5248 + }
5249 +
5250 ret = devm_request_threaded_irq(&pdev->dev, q6v5->wdog_irq,
5251 NULL, q6v5_wdog_interrupt,
5252 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
5253 @@ -198,8 +206,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
5254 }
5255
5256 q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal");
5257 - if (q6v5->fatal_irq == -EPROBE_DEFER)
5258 - return -EPROBE_DEFER;
5259 + if (q6v5->fatal_irq < 0) {
5260 + if (q6v5->fatal_irq != -EPROBE_DEFER)
5261 + dev_err(&pdev->dev,
5262 + "failed to retrieve fatal IRQ: %d\n",
5263 + q6v5->fatal_irq);
5264 + return q6v5->fatal_irq;
5265 + }
5266
5267 ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq,
5268 NULL, q6v5_fatal_interrupt,
5269 @@ -211,8 +224,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
5270 }
5271
5272 q6v5->ready_irq = platform_get_irq_byname(pdev, "ready");
5273 - if (q6v5->ready_irq == -EPROBE_DEFER)
5274 - return -EPROBE_DEFER;
5275 + if (q6v5->ready_irq < 0) {
5276 + if (q6v5->ready_irq != -EPROBE_DEFER)
5277 + dev_err(&pdev->dev,
5278 + "failed to retrieve ready IRQ: %d\n",
5279 + q6v5->ready_irq);
5280 + return q6v5->ready_irq;
5281 + }
5282
5283 ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq,
5284 NULL, q6v5_ready_interrupt,
5285 @@ -224,8 +242,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
5286 }
5287
5288 q6v5->handover_irq = platform_get_irq_byname(pdev, "handover");
5289 - if (q6v5->handover_irq == -EPROBE_DEFER)
5290 - return -EPROBE_DEFER;
5291 + if (q6v5->handover_irq < 0) {
5292 + if (q6v5->handover_irq != -EPROBE_DEFER)
5293 + dev_err(&pdev->dev,
5294 + "failed to retrieve handover IRQ: %d\n",
5295 + q6v5->handover_irq);
5296 + return q6v5->handover_irq;
5297 + }
5298
5299 ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq,
5300 NULL, q6v5_handover_interrupt,
5301 @@ -238,8 +261,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
5302 disable_irq(q6v5->handover_irq);
5303
5304 q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack");
5305 - if (q6v5->stop_irq == -EPROBE_DEFER)
5306 - return -EPROBE_DEFER;
5307 + if (q6v5->stop_irq < 0) {
5308 + if (q6v5->stop_irq != -EPROBE_DEFER)
5309 + dev_err(&pdev->dev,
5310 + "failed to retrieve stop-ack IRQ: %d\n",
5311 + q6v5->stop_irq);
5312 + return q6v5->stop_irq;
5313 + }
5314
5315 ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq,
5316 NULL, q6v5_stop_interrupt,
5317 diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
5318 index d7a4b9eca5d2..6a84b6372897 100644
5319 --- a/drivers/remoteproc/qcom_q6v5_pil.c
5320 +++ b/drivers/remoteproc/qcom_q6v5_pil.c
5321 @@ -1132,6 +1132,9 @@ static int q6v5_probe(struct platform_device *pdev)
5322 if (!desc)
5323 return -EINVAL;
5324
5325 + if (desc->need_mem_protection && !qcom_scm_is_available())
5326 + return -EPROBE_DEFER;
5327 +
5328 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
5329 desc->hexagon_mba_image, sizeof(*qproc));
5330 if (!rproc) {
5331 diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
5332 index a57b969b8973..3be54651698a 100644
5333 --- a/drivers/s390/crypto/ap_bus.c
5334 +++ b/drivers/s390/crypto/ap_bus.c
5335 @@ -777,6 +777,8 @@ static int ap_device_probe(struct device *dev)
5336 drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
5337 if (!!devres != !!drvres)
5338 return -ENODEV;
5339 + /* (re-)init queue's state machine */
5340 + ap_queue_reinit_state(to_ap_queue(dev));
5341 }
5342
5343 /* Add queue/card to list of active queues/cards */
5344 @@ -809,6 +811,8 @@ static int ap_device_remove(struct device *dev)
5345 struct ap_device *ap_dev = to_ap_dev(dev);
5346 struct ap_driver *ap_drv = ap_dev->drv;
5347
5348 + if (is_queue_dev(dev))
5349 + ap_queue_remove(to_ap_queue(dev));
5350 if (ap_drv->remove)
5351 ap_drv->remove(ap_dev);
5352
5353 @@ -1446,10 +1450,6 @@ static void ap_scan_bus(struct work_struct *unused)
5354 aq->ap_dev.device.parent = &ac->ap_dev.device;
5355 dev_set_name(&aq->ap_dev.device,
5356 "%02x.%04x", id, dom);
5357 - /* Start with a device reset */
5358 - spin_lock_bh(&aq->lock);
5359 - ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
5360 - spin_unlock_bh(&aq->lock);
5361 /* Register device */
5362 rc = device_register(&aq->ap_dev.device);
5363 if (rc) {
5364 diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
5365 index 5246cd8c16a6..7e85d238767b 100644
5366 --- a/drivers/s390/crypto/ap_bus.h
5367 +++ b/drivers/s390/crypto/ap_bus.h
5368 @@ -253,6 +253,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
5369 void ap_queue_remove(struct ap_queue *aq);
5370 void ap_queue_suspend(struct ap_device *ap_dev);
5371 void ap_queue_resume(struct ap_device *ap_dev);
5372 +void ap_queue_reinit_state(struct ap_queue *aq);
5373
5374 struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
5375 int comp_device_type, unsigned int functions);
5376 diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
5377 index 66f7334bcb03..0aa4b3ccc948 100644
5378 --- a/drivers/s390/crypto/ap_queue.c
5379 +++ b/drivers/s390/crypto/ap_queue.c
5380 @@ -718,5 +718,20 @@ void ap_queue_remove(struct ap_queue *aq)
5381 {
5382 ap_flush_queue(aq);
5383 del_timer_sync(&aq->timeout);
5384 +
5385 + /* reset with zero, also clears irq registration */
5386 + spin_lock_bh(&aq->lock);
5387 + ap_zapq(aq->qid);
5388 + aq->state = AP_STATE_BORKED;
5389 + spin_unlock_bh(&aq->lock);
5390 }
5391 EXPORT_SYMBOL(ap_queue_remove);
5392 +
5393 +void ap_queue_reinit_state(struct ap_queue *aq)
5394 +{
5395 + spin_lock_bh(&aq->lock);
5396 + aq->state = AP_STATE_RESET_START;
5397 + ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
5398 + spin_unlock_bh(&aq->lock);
5399 +}
5400 +EXPORT_SYMBOL(ap_queue_reinit_state);
5401 diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
5402 index f4ae5fa30ec9..ff17a00273f7 100644
5403 --- a/drivers/s390/crypto/zcrypt_cex2a.c
5404 +++ b/drivers/s390/crypto/zcrypt_cex2a.c
5405 @@ -198,7 +198,6 @@ static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
5406 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
5407 struct zcrypt_queue *zq = aq->private;
5408
5409 - ap_queue_remove(aq);
5410 if (zq)
5411 zcrypt_queue_unregister(zq);
5412 }
5413 diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
5414 index 35d58dbbc4da..2a42e5962317 100644
5415 --- a/drivers/s390/crypto/zcrypt_cex4.c
5416 +++ b/drivers/s390/crypto/zcrypt_cex4.c
5417 @@ -273,7 +273,6 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
5418 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
5419 struct zcrypt_queue *zq = aq->private;
5420
5421 - ap_queue_remove(aq);
5422 if (zq)
5423 zcrypt_queue_unregister(zq);
5424 }
5425 diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
5426 index 94d9f7224aea..baa683c3f5d3 100644
5427 --- a/drivers/s390/crypto/zcrypt_pcixcc.c
5428 +++ b/drivers/s390/crypto/zcrypt_pcixcc.c
5429 @@ -276,7 +276,6 @@ static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev)
5430 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
5431 struct zcrypt_queue *zq = aq->private;
5432
5433 - ap_queue_remove(aq);
5434 if (zq)
5435 zcrypt_queue_unregister(zq);
5436 }
5437 diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
5438 index 3c86e27f094d..aff073a5b52b 100644
5439 --- a/drivers/s390/scsi/zfcp_fsf.c
5440 +++ b/drivers/s390/scsi/zfcp_fsf.c
5441 @@ -1594,6 +1594,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
5442 {
5443 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
5444 struct zfcp_fsf_req *req;
5445 + unsigned long req_id = 0;
5446 int retval = -EIO;
5447
5448 spin_lock_irq(&qdio->req_q_lock);
5449 @@ -1616,6 +1617,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
5450 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
5451 req->data = wka_port;
5452
5453 + req_id = req->req_id;
5454 +
5455 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
5456 retval = zfcp_fsf_req_send(req);
5457 if (retval)
5458 @@ -1623,7 +1626,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
5459 out:
5460 spin_unlock_irq(&qdio->req_q_lock);
5461 if (!retval)
5462 - zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
5463 + zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
5464 return retval;
5465 }
5466
5467 @@ -1649,6 +1652,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
5468 {
5469 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
5470 struct zfcp_fsf_req *req;
5471 + unsigned long req_id = 0;
5472 int retval = -EIO;
5473
5474 spin_lock_irq(&qdio->req_q_lock);
5475 @@ -1671,6 +1675,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
5476 req->data = wka_port;
5477 req->qtcb->header.port_handle = wka_port->handle;
5478
5479 + req_id = req->req_id;
5480 +
5481 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
5482 retval = zfcp_fsf_req_send(req);
5483 if (retval)
5484 @@ -1678,7 +1684,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
5485 out:
5486 spin_unlock_irq(&qdio->req_q_lock);
5487 if (!retval)
5488 - zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
5489 + zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
5490 return retval;
5491 }
5492
5493 diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
5494 index ec54538f7ae1..67efdf25657f 100644
5495 --- a/drivers/s390/virtio/virtio_ccw.c
5496 +++ b/drivers/s390/virtio/virtio_ccw.c
5497 @@ -132,6 +132,7 @@ struct airq_info {
5498 struct airq_iv *aiv;
5499 };
5500 static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
5501 +static DEFINE_MUTEX(airq_areas_lock);
5502
5503 #define CCW_CMD_SET_VQ 0x13
5504 #define CCW_CMD_VDEV_RESET 0x33
5505 @@ -244,9 +245,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
5506 unsigned long bit, flags;
5507
5508 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
5509 + mutex_lock(&airq_areas_lock);
5510 if (!airq_areas[i])
5511 airq_areas[i] = new_airq_info();
5512 info = airq_areas[i];
5513 + mutex_unlock(&airq_areas_lock);
5514 if (!info)
5515 return 0;
5516 write_lock_irqsave(&info->lock, flags);
5517 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
5518 index 806ceabcabc3..bc37666f998e 100644
5519 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
5520 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
5521 @@ -5218,7 +5218,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5522 {
5523 u32 max_sectors_1;
5524 u32 max_sectors_2, tmp_sectors, msix_enable;
5525 - u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
5526 + u32 scratch_pad_2, scratch_pad_3, scratch_pad_4, status_reg;
5527 resource_size_t base_addr;
5528 struct megasas_register_set __iomem *reg_set;
5529 struct megasas_ctrl_info *ctrl_info = NULL;
5530 @@ -5226,6 +5226,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5531 int i, j, loop, fw_msix_count = 0;
5532 struct IOV_111 *iovPtr;
5533 struct fusion_context *fusion;
5534 + bool do_adp_reset = true;
5535
5536 fusion = instance->ctrl_context;
5537
5538 @@ -5274,19 +5275,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
5539 }
5540
5541 if (megasas_transition_to_ready(instance, 0)) {
5542 - atomic_set(&instance->fw_reset_no_pci_access, 1);
5543 - instance->instancet->adp_reset
5544 - (instance, instance->reg_set);
5545 - atomic_set(&instance->fw_reset_no_pci_access, 0);
5546 - dev_info(&instance->pdev->dev,
5547 - "FW restarted successfully from %s!\n",
5548 - __func__);
5549 + if (instance->adapter_type >= INVADER_SERIES) {
5550 + status_reg = instance->instancet->read_fw_status_reg(
5551 + instance->reg_set);
5552 + do_adp_reset = status_reg & MFI_RESET_ADAPTER;
5553 + }
5554
5555 - /*waitting for about 30 second before retry*/
5556 - ssleep(30);
5557 + if (do_adp_reset) {
5558 + atomic_set(&instance->fw_reset_no_pci_access, 1);
5559 + instance->instancet->adp_reset
5560 + (instance, instance->reg_set);
5561 + atomic_set(&instance->fw_reset_no_pci_access, 0);
5562 + dev_info(&instance->pdev->dev,
5563 + "FW restarted successfully from %s!\n",
5564 + __func__);
5565
5566 - if (megasas_transition_to_ready(instance, 0))
5567 + /*waiting for about 30 second before retry*/
5568 + ssleep(30);
5569 +
5570 + if (megasas_transition_to_ready(instance, 0))
5571 + goto fail_ready_state;
5572 + } else {
5573 goto fail_ready_state;
5574 + }
5575 }
5576
5577 megasas_init_ctrl_params(instance);
5578 @@ -5325,12 +5336,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
5579 instance->msix_vectors = (scratch_pad_2
5580 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5581 fw_msix_count = instance->msix_vectors;
5582 - } else { /* Invader series supports more than 8 MSI-x vectors*/
5583 + } else {
5584 instance->msix_vectors = ((scratch_pad_2
5585 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5586 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5587 - if (instance->msix_vectors > 16)
5588 - instance->msix_combined = true;
5589 +
5590 + /*
5591 + * For Invader series, > 8 MSI-x vectors
5592 + * supported by FW/HW implies combined
5593 + * reply queue mode is enabled.
5594 + * For Ventura series, > 16 MSI-x vectors
5595 + * supported by FW/HW implies combined
5596 + * reply queue mode is enabled.
5597 + */
5598 + switch (instance->adapter_type) {
5599 + case INVADER_SERIES:
5600 + if (instance->msix_vectors > 8)
5601 + instance->msix_combined = true;
5602 + break;
5603 + case VENTURA_SERIES:
5604 + if (instance->msix_vectors > 16)
5605 + instance->msix_combined = true;
5606 + break;
5607 + }
5608
5609 if (rdpq_enable)
5610 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5611 @@ -6028,13 +6056,13 @@ static int megasas_io_attach(struct megasas_instance *instance)
5612 * @instance: Adapter soft state
5613 * Description:
5614 *
5615 - * For Ventura, driver/FW will operate in 64bit DMA addresses.
5616 + * For Ventura, driver/FW will operate in 63bit DMA addresses.
5617 *
5618 * For invader-
5619 * By default, driver/FW will operate in 32bit DMA addresses
5620 * for consistent DMA mapping but if 32 bit consistent
5621 - * DMA mask fails, driver will try with 64 bit consistent
5622 - * mask provided FW is true 64bit DMA capable
5623 + * DMA mask fails, driver will try with 63 bit consistent
5624 + * mask provided FW is true 63bit DMA capable
5625 *
5626 * For older controllers(Thunderbolt and MFI based adapters)-
5627 * driver/FW will operate in 32 bit consistent DMA addresses.
5628 @@ -6047,15 +6075,15 @@ megasas_set_dma_mask(struct megasas_instance *instance)
5629 u32 scratch_pad_2;
5630
5631 pdev = instance->pdev;
5632 - consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
5633 - DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
5634 + consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
5635 + DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
5636
5637 if (IS_DMA64) {
5638 - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
5639 + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
5640 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
5641 goto fail_set_dma_mask;
5642
5643 - if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
5644 + if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
5645 (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
5646 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
5647 /*
5648 @@ -6068,7 +6096,7 @@ megasas_set_dma_mask(struct megasas_instance *instance)
5649 if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
5650 goto fail_set_dma_mask;
5651 else if (dma_set_mask_and_coherent(&pdev->dev,
5652 - DMA_BIT_MASK(64)))
5653 + DMA_BIT_MASK(63)))
5654 goto fail_set_dma_mask;
5655 }
5656 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
5657 @@ -6080,8 +6108,8 @@ megasas_set_dma_mask(struct megasas_instance *instance)
5658 instance->consistent_mask_64bit = true;
5659
5660 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
5661 - ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
5662 - (instance->consistent_mask_64bit ? "64" : "32"));
5663 + ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"),
5664 + (instance->consistent_mask_64bit ? "63" : "32"));
5665
5666 return 0;
5667
5668 diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
5669 index 1f1a05a90d3d..fc08e46a93ca 100644
5670 --- a/drivers/scsi/qla2xxx/qla_gs.c
5671 +++ b/drivers/scsi/qla2xxx/qla_gs.c
5672 @@ -3360,15 +3360,15 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
5673 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
5674 sp->done = qla24xx_async_gpsc_sp_done;
5675
5676 - rval = qla2x00_start_sp(sp);
5677 - if (rval != QLA_SUCCESS)
5678 - goto done_free_sp;
5679 -
5680 ql_dbg(ql_dbg_disc, vha, 0x205e,
5681 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
5682 sp->name, fcport->port_name, sp->handle,
5683 fcport->loop_id, fcport->d_id.b.domain,
5684 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5685 +
5686 + rval = qla2x00_start_sp(sp);
5687 + if (rval != QLA_SUCCESS)
5688 + goto done_free_sp;
5689 return rval;
5690
5691 done_free_sp:
5692 @@ -3729,13 +3729,14 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
5693 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
5694 sp->done = qla2x00_async_gpnid_sp_done;
5695
5696 + ql_dbg(ql_dbg_disc, vha, 0x2067,
5697 + "Async-%s hdl=%x ID %3phC.\n", sp->name,
5698 + sp->handle, ct_req->req.port_id.port_id);
5699 +
5700 rval = qla2x00_start_sp(sp);
5701 if (rval != QLA_SUCCESS)
5702 goto done_free_sp;
5703
5704 - ql_dbg(ql_dbg_disc, vha, 0x2067,
5705 - "Async-%s hdl=%x ID %3phC.\n", sp->name,
5706 - sp->handle, ct_req->req.port_id.port_id);
5707 return rval;
5708
5709 done_free_sp:
5710 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
5711 index ddce32fe0513..39a8f4a671aa 100644
5712 --- a/drivers/scsi/qla2xxx/qla_init.c
5713 +++ b/drivers/scsi/qla2xxx/qla_init.c
5714 @@ -247,6 +247,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
5715
5716 }
5717
5718 + ql_dbg(ql_dbg_disc, vha, 0x2072,
5719 + "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
5720 + "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
5721 + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
5722 + fcport->login_retry);
5723 +
5724 rval = qla2x00_start_sp(sp);
5725 if (rval != QLA_SUCCESS) {
5726 fcport->flags |= FCF_LOGIN_NEEDED;
5727 @@ -254,11 +260,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
5728 goto done_free_sp;
5729 }
5730
5731 - ql_dbg(ql_dbg_disc, vha, 0x2072,
5732 - "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
5733 - "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
5734 - fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
5735 - fcport->login_retry);
5736 return rval;
5737
5738 done_free_sp:
5739 @@ -303,15 +304,16 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
5740 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
5741
5742 sp->done = qla2x00_async_logout_sp_done;
5743 - rval = qla2x00_start_sp(sp);
5744 - if (rval != QLA_SUCCESS)
5745 - goto done_free_sp;
5746
5747 ql_dbg(ql_dbg_disc, vha, 0x2070,
5748 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
5749 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
5750 fcport->d_id.b.area, fcport->d_id.b.al_pa,
5751 fcport->port_name);
5752 +
5753 + rval = qla2x00_start_sp(sp);
5754 + if (rval != QLA_SUCCESS)
5755 + goto done_free_sp;
5756 return rval;
5757
5758 done_free_sp:
5759 @@ -489,13 +491,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
5760 sp->done = qla2x00_async_adisc_sp_done;
5761 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
5762 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
5763 - rval = qla2x00_start_sp(sp);
5764 - if (rval != QLA_SUCCESS)
5765 - goto done_free_sp;
5766
5767 ql_dbg(ql_dbg_disc, vha, 0x206f,
5768 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
5769 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
5770 +
5771 + rval = qla2x00_start_sp(sp);
5772 + if (rval != QLA_SUCCESS)
5773 + goto done_free_sp;
5774 +
5775 return rval;
5776
5777 done_free_sp:
5778 @@ -1161,14 +1165,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
5779
5780 sp->done = qla24xx_async_gpdb_sp_done;
5781
5782 - rval = qla2x00_start_sp(sp);
5783 - if (rval != QLA_SUCCESS)
5784 - goto done_free_sp;
5785 -
5786 ql_dbg(ql_dbg_disc, vha, 0x20dc,
5787 "Async-%s %8phC hndl %x opt %x\n",
5788 sp->name, fcport->port_name, sp->handle, opt);
5789
5790 + rval = qla2x00_start_sp(sp);
5791 + if (rval != QLA_SUCCESS)
5792 + goto done_free_sp;
5793 return rval;
5794
5795 done_free_sp:
5796 @@ -1698,15 +1701,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
5797 tm_iocb->u.tmf.data = tag;
5798 sp->done = qla2x00_tmf_sp_done;
5799
5800 - rval = qla2x00_start_sp(sp);
5801 - if (rval != QLA_SUCCESS)
5802 - goto done_free_sp;
5803 -
5804 ql_dbg(ql_dbg_taskm, vha, 0x802f,
5805 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
5806 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
5807 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5808
5809 + rval = qla2x00_start_sp(sp);
5810 + if (rval != QLA_SUCCESS)
5811 + goto done_free_sp;
5812 wait_for_completion(&tm_iocb->u.tmf.comp);
5813
5814 rval = tm_iocb->u.tmf.data;
5815 @@ -1790,14 +1792,14 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
5816
5817 sp->done = qla24xx_abort_sp_done;
5818
5819 - rval = qla2x00_start_sp(sp);
5820 - if (rval != QLA_SUCCESS)
5821 - goto done_free_sp;
5822 -
5823 ql_dbg(ql_dbg_async, vha, 0x507c,
5824 "Abort command issued - hdl=%x, target_id=%x\n",
5825 cmd_sp->handle, fcport->tgt_id);
5826
5827 + rval = qla2x00_start_sp(sp);
5828 + if (rval != QLA_SUCCESS)
5829 + goto done_free_sp;
5830 +
5831 if (wait) {
5832 wait_for_completion(&abt_iocb->u.abt.comp);
5833 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
5834 diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
5835 index 088772ebef9b..77838d8fd9bb 100644
5836 --- a/drivers/spi/spi-gpio.c
5837 +++ b/drivers/spi/spi-gpio.c
5838 @@ -410,7 +410,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
5839 return status;
5840
5841 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
5842 - master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL;
5843 + master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL | SPI_CS_HIGH;
5844 master->flags = master_flags;
5845 master->bus_num = pdev->id;
5846 /* The master needs to think there is a chipselect even if not connected */
5847 @@ -437,7 +437,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
5848 spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
5849 }
5850 spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
5851 - spi_gpio->bitbang.flags = SPI_CS_HIGH;
5852
5853 status = spi_bitbang_start(&spi_gpio->bitbang);
5854 if (status)
5855 diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
5856 index 649caae2b603..25798119426b 100644
5857 --- a/drivers/staging/wilc1000/linux_wlan.c
5858 +++ b/drivers/staging/wilc1000/linux_wlan.c
5859 @@ -649,17 +649,17 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
5860 goto fail_locks;
5861 }
5862
5863 - if (wl->gpio_irq && init_irq(dev)) {
5864 - ret = -EIO;
5865 - goto fail_locks;
5866 - }
5867 -
5868 ret = wlan_initialize_threads(dev);
5869 if (ret < 0) {
5870 ret = -EIO;
5871 goto fail_wilc_wlan;
5872 }
5873
5874 + if (wl->gpio_irq && init_irq(dev)) {
5875 + ret = -EIO;
5876 + goto fail_threads;
5877 + }
5878 +
5879 if (!wl->dev_irq_num &&
5880 wl->hif_func->enable_interrupt &&
5881 wl->hif_func->enable_interrupt(wl)) {
5882 @@ -715,7 +715,7 @@ fail_irq_enable:
5883 fail_irq_init:
5884 if (wl->dev_irq_num)
5885 deinit_irq(dev);
5886 -
5887 +fail_threads:
5888 wlan_deinitialize_threads(dev);
5889 fail_wilc_wlan:
5890 wilc_wlan_cleanup(dev);
5891 diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
5892 index ce1321a5cb7b..854b2bcca7c1 100644
5893 --- a/drivers/target/target_core_iblock.c
5894 +++ b/drivers/target/target_core_iblock.c
5895 @@ -514,8 +514,8 @@ iblock_execute_write_same(struct se_cmd *cmd)
5896 }
5897
5898 /* Always in 512 byte units for Linux/Block */
5899 - block_lba += sg->length >> IBLOCK_LBA_SHIFT;
5900 - sectors -= 1;
5901 + block_lba += sg->length >> SECTOR_SHIFT;
5902 + sectors -= sg->length >> SECTOR_SHIFT;
5903 }
5904
5905 iblock_submit_bios(&list);
5906 @@ -757,7 +757,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
5907 }
5908
5909 /* Always in 512 byte units for Linux/Block */
5910 - block_lba += sg->length >> IBLOCK_LBA_SHIFT;
5911 + block_lba += sg->length >> SECTOR_SHIFT;
5912 sg_num--;
5913 }
5914
5915 diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
5916 index 9cc3843404d4..cefc641145b3 100644
5917 --- a/drivers/target/target_core_iblock.h
5918 +++ b/drivers/target/target_core_iblock.h
5919 @@ -9,7 +9,6 @@
5920 #define IBLOCK_VERSION "4.0"
5921
5922 #define IBLOCK_MAX_CDBS 16
5923 -#define IBLOCK_LBA_SHIFT 9
5924
5925 struct iblock_req {
5926 refcount_t pending;
5927 diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
5928 index fb20aa974ae1..819ae3b2bd7e 100644
5929 --- a/drivers/usb/typec/tcpm.c
5930 +++ b/drivers/usb/typec/tcpm.c
5931 @@ -37,6 +37,7 @@
5932 S(SRC_ATTACHED), \
5933 S(SRC_STARTUP), \
5934 S(SRC_SEND_CAPABILITIES), \
5935 + S(SRC_SEND_CAPABILITIES_TIMEOUT), \
5936 S(SRC_NEGOTIATE_CAPABILITIES), \
5937 S(SRC_TRANSITION_SUPPLY), \
5938 S(SRC_READY), \
5939 @@ -2987,10 +2988,34 @@ static void run_state_machine(struct tcpm_port *port)
5940 /* port->hard_reset_count = 0; */
5941 port->caps_count = 0;
5942 port->pd_capable = true;
5943 - tcpm_set_state_cond(port, hard_reset_state(port),
5944 + tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
5945 PD_T_SEND_SOURCE_CAP);
5946 }
5947 break;
5948 + case SRC_SEND_CAPABILITIES_TIMEOUT:
5949 + /*
5950 + * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
5951 + *
5952 + * PD 2.0 sinks are supposed to accept src-capabilities with a
5953 + * 3.0 header and simply ignore any src PDOs which the sink does
5954 + * not understand such as PPS but some 2.0 sinks instead ignore
5955 + * the entire PD_DATA_SOURCE_CAP message, causing contract
5956 + * negotiation to fail.
5957 + *
5958 + * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
5959 + * sending src-capabilities with a lower PD revision to
5960 + * make these broken sinks work.
5961 + */
5962 + if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
5963 + tcpm_set_state(port, HARD_RESET_SEND, 0);
5964 + } else if (port->negotiated_rev > PD_REV20) {
5965 + port->negotiated_rev--;
5966 + port->hard_reset_count = 0;
5967 + tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
5968 + } else {
5969 + tcpm_set_state(port, hard_reset_state(port), 0);
5970 + }
5971 + break;
5972 case SRC_NEGOTIATE_CAPABILITIES:
5973 ret = tcpm_pd_check_request(port);
5974 if (ret < 0) {
5975 diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
5976 index 40589850eb33..a9be2d8e98df 100644
5977 --- a/drivers/vhost/test.c
5978 +++ b/drivers/vhost/test.c
5979 @@ -23,6 +23,12 @@
5980 * Using this limit prevents one virtqueue from starving others. */
5981 #define VHOST_TEST_WEIGHT 0x80000
5982
5983 +/* Max number of packets transferred before requeueing the job.
5984 + * Using this limit prevents one virtqueue from starving others with
5985 + * pkts.
5986 + */
5987 +#define VHOST_TEST_PKT_WEIGHT 256
5988 +
5989 enum {
5990 VHOST_TEST_VQ = 0,
5991 VHOST_TEST_VQ_MAX = 1,
5992 @@ -81,10 +87,8 @@ static void handle_vq(struct vhost_test *n)
5993 }
5994 vhost_add_used_and_signal(&n->dev, vq, head, 0);
5995 total_len += len;
5996 - if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
5997 - vhost_poll_queue(&vq->poll);
5998 + if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
5999 break;
6000 - }
6001 }
6002
6003 mutex_unlock(&vq->mutex);
6004 @@ -116,7 +120,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
6005 dev = &n->dev;
6006 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
6007 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
6008 - vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
6009 + vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
6010 + VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
6011
6012 f->private_data = n;
6013
6014 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
6015 index 0752f8dc47b1..98b6eb902df9 100644
6016 --- a/drivers/vhost/vhost.c
6017 +++ b/drivers/vhost/vhost.c
6018 @@ -2073,7 +2073,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
6019 /* If this is an input descriptor, increment that count. */
6020 if (access == VHOST_ACCESS_WO) {
6021 *in_num += ret;
6022 - if (unlikely(log)) {
6023 + if (unlikely(log && ret)) {
6024 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
6025 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
6026 ++*log_num;
6027 @@ -2216,7 +2216,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
6028 /* If this is an input descriptor,
6029 * increment that count. */
6030 *in_num += ret;
6031 - if (unlikely(log)) {
6032 + if (unlikely(log && ret)) {
6033 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
6034 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
6035 ++*log_num;
6036 diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
6037 index 9bfa66592aa7..c71e534ca7ef 100644
6038 --- a/fs/btrfs/compression.c
6039 +++ b/fs/btrfs/compression.c
6040 @@ -42,6 +42,22 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type)
6041 return NULL;
6042 }
6043
6044 +bool btrfs_compress_is_valid_type(const char *str, size_t len)
6045 +{
6046 + int i;
6047 +
6048 + for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
6049 + size_t comp_len = strlen(btrfs_compress_types[i]);
6050 +
6051 + if (len < comp_len)
6052 + continue;
6053 +
6054 + if (!strncmp(btrfs_compress_types[i], str, comp_len))
6055 + return true;
6056 + }
6057 + return false;
6058 +}
6059 +
6060 static int btrfs_decompress_bio(struct compressed_bio *cb);
6061
6062 static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
6063 diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
6064 index ddda9b80bf20..f97d90a1fa53 100644
6065 --- a/fs/btrfs/compression.h
6066 +++ b/fs/btrfs/compression.h
6067 @@ -127,6 +127,7 @@ extern const struct btrfs_compress_op btrfs_lzo_compress;
6068 extern const struct btrfs_compress_op btrfs_zstd_compress;
6069
6070 const char* btrfs_compress_type2str(enum btrfs_compression_type type);
6071 +bool btrfs_compress_is_valid_type(const char *str, size_t len);
6072
6073 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
6074
6075 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
6076 index 82682da5a40d..4644f9b629a5 100644
6077 --- a/fs/btrfs/ctree.h
6078 +++ b/fs/btrfs/ctree.h
6079 @@ -3200,6 +3200,9 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
6080 struct btrfs_trans_handle *trans, int mode,
6081 u64 start, u64 num_bytes, u64 min_size,
6082 loff_t actual_len, u64 *alloc_hint);
6083 +int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
6084 + u64 start, u64 end, int *page_started, unsigned long *nr_written,
6085 + struct writeback_control *wbc);
6086 extern const struct dentry_operations btrfs_dentry_operations;
6087 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
6088 void btrfs_test_inode_set_ops(struct inode *inode);
6089 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
6090 index 0cc800d22a08..88c939f7aad9 100644
6091 --- a/fs/btrfs/extent-tree.c
6092 +++ b/fs/btrfs/extent-tree.c
6093 @@ -10478,22 +10478,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
6094 }
6095 spin_unlock(&block_group->lock);
6096
6097 - if (remove_em) {
6098 - struct extent_map_tree *em_tree;
6099 -
6100 - em_tree = &fs_info->mapping_tree.map_tree;
6101 - write_lock(&em_tree->lock);
6102 - /*
6103 - * The em might be in the pending_chunks list, so make sure the
6104 - * chunk mutex is locked, since remove_extent_mapping() will
6105 - * delete us from that list.
6106 - */
6107 - remove_extent_mapping(em_tree, em);
6108 - write_unlock(&em_tree->lock);
6109 - /* once for the tree */
6110 - free_extent_map(em);
6111 - }
6112 -
6113 mutex_unlock(&fs_info->chunk_mutex);
6114
6115 ret = remove_block_group_free_space(trans, block_group);
6116 @@ -10510,6 +10494,24 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
6117 goto out;
6118
6119 ret = btrfs_del_item(trans, root, path);
6120 + if (ret)
6121 + goto out;
6122 +
6123 + if (remove_em) {
6124 + struct extent_map_tree *em_tree;
6125 +
6126 + em_tree = &fs_info->mapping_tree.map_tree;
6127 + write_lock(&em_tree->lock);
6128 + /*
6129 + * The em might be in the pending_chunks list, so make sure the
6130 + * chunk mutex is locked, since remove_extent_mapping() will
6131 + * delete us from that list.
6132 + */
6133 + remove_extent_mapping(em_tree, em);
6134 + write_unlock(&em_tree->lock);
6135 + /* once for the tree */
6136 + free_extent_map(em);
6137 + }
6138 out:
6139 btrfs_free_path(path);
6140 return ret;
6141 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
6142 index 90b0a6eff535..cb598eb4f3bd 100644
6143 --- a/fs/btrfs/extent_io.c
6144 +++ b/fs/btrfs/extent_io.c
6145 @@ -3199,7 +3199,7 @@ static void update_nr_written(struct writeback_control *wbc,
6146 /*
6147 * helper for __extent_writepage, doing all of the delayed allocation setup.
6148 *
6149 - * This returns 1 if our fill_delalloc function did all the work required
6150 + * This returns 1 if btrfs_run_delalloc_range function did all the work required
6151 * to write the page (copy into inline extent). In this case the IO has
6152 * been started and the page is already unlocked.
6153 *
6154 @@ -3220,7 +3220,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
6155 int ret;
6156 int page_started = 0;
6157
6158 - if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
6159 + if (epd->extent_locked)
6160 return 0;
6161
6162 while (delalloc_end < page_end) {
6163 @@ -3233,18 +3233,16 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
6164 delalloc_start = delalloc_end + 1;
6165 continue;
6166 }
6167 - ret = tree->ops->fill_delalloc(inode, page,
6168 - delalloc_start,
6169 - delalloc_end,
6170 - &page_started,
6171 - nr_written, wbc);
6172 + ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
6173 + delalloc_end, &page_started, nr_written, wbc);
6174 /* File system has been set read-only */
6175 if (ret) {
6176 SetPageError(page);
6177 - /* fill_delalloc should be return < 0 for error
6178 - * but just in case, we use > 0 here meaning the
6179 - * IO is started, so we don't want to return > 0
6180 - * unless things are going well.
6181 + /*
6182 + * btrfs_run_delalloc_range should return < 0 for error
6183 + * but just in case, we use > 0 here meaning the IO is
6184 + * started, so we don't want to return > 0 unless
6185 + * things are going well.
6186 */
6187 ret = ret < 0 ? ret : -EIO;
6188 goto done;
6189 diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
6190 index b4d03e677e1d..ed27becd963c 100644
6191 --- a/fs/btrfs/extent_io.h
6192 +++ b/fs/btrfs/extent_io.h
6193 @@ -106,11 +106,6 @@ struct extent_io_ops {
6194 /*
6195 * Optional hooks, called if the pointer is not NULL
6196 */
6197 - int (*fill_delalloc)(void *private_data, struct page *locked_page,
6198 - u64 start, u64 end, int *page_started,
6199 - unsigned long *nr_written,
6200 - struct writeback_control *wbc);
6201 -
6202 int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
6203 void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
6204 struct extent_state *state, int uptodate);
6205 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
6206 index 355ff08e9d44..98c535ae038d 100644
6207 --- a/fs/btrfs/inode.c
6208 +++ b/fs/btrfs/inode.c
6209 @@ -110,17 +110,17 @@ static void __endio_write_update_ordered(struct inode *inode,
6210 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
6211 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
6212 * to be released, which we want to happen only when finishing the ordered
6213 - * extent (btrfs_finish_ordered_io()). Also note that the caller of the
6214 - * fill_delalloc() callback already does proper cleanup for the first page of
6215 - * the range, that is, it invokes the callback writepage_end_io_hook() for the
6216 - * range of the first page.
6217 + * extent (btrfs_finish_ordered_io()).
6218 */
6219 static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
6220 - const u64 offset,
6221 - const u64 bytes)
6222 + struct page *locked_page,
6223 + u64 offset, u64 bytes)
6224 {
6225 unsigned long index = offset >> PAGE_SHIFT;
6226 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
6227 + u64 page_start = page_offset(locked_page);
6228 + u64 page_end = page_start + PAGE_SIZE - 1;
6229 +
6230 struct page *page;
6231
6232 while (index <= end_index) {
6233 @@ -131,8 +131,18 @@ static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
6234 ClearPagePrivate2(page);
6235 put_page(page);
6236 }
6237 - return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
6238 - bytes - PAGE_SIZE, false);
6239 +
6240 + /*
6241 + * In case this page belongs to the delalloc range being instantiated
6242 + * then skip it, since the first page of a range is going to be
6243 + * properly cleaned up by the caller of run_delalloc_range
6244 + */
6245 + if (page_start >= offset && page_end <= (offset + bytes - 1)) {
6246 + offset += PAGE_SIZE;
6247 + bytes -= PAGE_SIZE;
6248 + }
6249 +
6250 + return __endio_write_update_ordered(inode, offset, bytes, false);
6251 }
6252
6253 static int btrfs_dirty_inode(struct inode *inode);
6254 @@ -1599,12 +1609,12 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
6255 }
6256
6257 /*
6258 - * extent_io.c call back to do delayed allocation processing
6259 + * Function to process delayed allocation (create CoW) for ranges which are
6260 + * being touched for the first time.
6261 */
6262 -static int run_delalloc_range(void *private_data, struct page *locked_page,
6263 - u64 start, u64 end, int *page_started,
6264 - unsigned long *nr_written,
6265 - struct writeback_control *wbc)
6266 +int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
6267 + u64 start, u64 end, int *page_started, unsigned long *nr_written,
6268 + struct writeback_control *wbc)
6269 {
6270 struct inode *inode = private_data;
6271 int ret;
6272 @@ -1629,7 +1639,8 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
6273 write_flags);
6274 }
6275 if (ret)
6276 - btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
6277 + btrfs_cleanup_ordered_extents(inode, locked_page, start,
6278 + end - start + 1);
6279 return ret;
6280 }
6281
6282 @@ -10598,7 +10609,6 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
6283 .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
6284
6285 /* optional callbacks */
6286 - .fill_delalloc = run_delalloc_range,
6287 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
6288 .writepage_start_hook = btrfs_writepage_start_hook,
6289 .set_bit_hook = btrfs_set_bit_hook,
6290 diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
6291 index 61d22a56c0ba..6980a0e13f18 100644
6292 --- a/fs/btrfs/props.c
6293 +++ b/fs/btrfs/props.c
6294 @@ -366,11 +366,7 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
6295
6296 static int prop_compression_validate(const char *value, size_t len)
6297 {
6298 - if (!strncmp("lzo", value, 3))
6299 - return 0;
6300 - else if (!strncmp("zlib", value, 4))
6301 - return 0;
6302 - else if (!strncmp("zstd", value, 4))
6303 + if (btrfs_compress_is_valid_type(value, len))
6304 return 0;
6305
6306 return -EINVAL;
6307 diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
6308 index 3be1456b5116..916c39770467 100644
6309 --- a/fs/btrfs/scrub.c
6310 +++ b/fs/btrfs/scrub.c
6311 @@ -322,6 +322,7 @@ static struct full_stripe_lock *insert_full_stripe_lock(
6312 struct rb_node *parent = NULL;
6313 struct full_stripe_lock *entry;
6314 struct full_stripe_lock *ret;
6315 + unsigned int nofs_flag;
6316
6317 lockdep_assert_held(&locks_root->lock);
6318
6319 @@ -339,8 +340,17 @@ static struct full_stripe_lock *insert_full_stripe_lock(
6320 }
6321 }
6322
6323 - /* Insert new lock */
6324 + /*
6325 + * Insert new lock.
6326 + *
6327 + * We must use GFP_NOFS because the scrub task might be waiting for a
6328 + * worker task executing this function and in turn a transaction commit
6329 + * might be waiting the scrub task to pause (which needs to wait for all
6330 + * the worker tasks to complete before pausing).
6331 + */
6332 + nofs_flag = memalloc_nofs_save();
6333 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
6334 + memalloc_nofs_restore(nofs_flag);
6335 if (!ret)
6336 return ERR_PTR(-ENOMEM);
6337 ret->logical = fstripe_logical;
6338 @@ -568,12 +578,11 @@ static void scrub_put_ctx(struct scrub_ctx *sctx)
6339 scrub_free_ctx(sctx);
6340 }
6341
6342 -static noinline_for_stack
6343 -struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
6344 +static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
6345 + struct btrfs_fs_info *fs_info, int is_dev_replace)
6346 {
6347 struct scrub_ctx *sctx;
6348 int i;
6349 - struct btrfs_fs_info *fs_info = dev->fs_info;
6350
6351 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
6352 if (!sctx)
6353 @@ -582,7 +591,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
6354 sctx->is_dev_replace = is_dev_replace;
6355 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
6356 sctx->curr = -1;
6357 - sctx->fs_info = dev->fs_info;
6358 + sctx->fs_info = fs_info;
6359 + INIT_LIST_HEAD(&sctx->csum_list);
6360 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
6361 struct scrub_bio *sbio;
6362
6363 @@ -607,7 +617,6 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
6364 atomic_set(&sctx->workers_pending, 0);
6365 atomic_set(&sctx->cancel_req, 0);
6366 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
6367 - INIT_LIST_HEAD(&sctx->csum_list);
6368
6369 spin_lock_init(&sctx->list_lock);
6370 spin_lock_init(&sctx->stat_lock);
6371 @@ -1622,8 +1631,19 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
6372 mutex_lock(&sctx->wr_lock);
6373 again:
6374 if (!sctx->wr_curr_bio) {
6375 + unsigned int nofs_flag;
6376 +
6377 + /*
6378 + * We must use GFP_NOFS because the scrub task might be waiting
6379 + * for a worker task executing this function and in turn a
6380 + * transaction commit might be waiting the scrub task to pause
6381 + * (which needs to wait for all the worker tasks to complete
6382 + * before pausing).
6383 + */
6384 + nofs_flag = memalloc_nofs_save();
6385 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
6386 GFP_KERNEL);
6387 + memalloc_nofs_restore(nofs_flag);
6388 if (!sctx->wr_curr_bio) {
6389 mutex_unlock(&sctx->wr_lock);
6390 return -ENOMEM;
6391 @@ -3022,8 +3042,7 @@ out:
6392 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
6393 struct map_lookup *map,
6394 struct btrfs_device *scrub_dev,
6395 - int num, u64 base, u64 length,
6396 - int is_dev_replace)
6397 + int num, u64 base, u64 length)
6398 {
6399 struct btrfs_path *path, *ppath;
6400 struct btrfs_fs_info *fs_info = sctx->fs_info;
6401 @@ -3299,7 +3318,7 @@ again:
6402 extent_physical = extent_logical - logical + physical;
6403 extent_dev = scrub_dev;
6404 extent_mirror_num = mirror_num;
6405 - if (is_dev_replace)
6406 + if (sctx->is_dev_replace)
6407 scrub_remap_extent(fs_info, extent_logical,
6408 extent_len, &extent_physical,
6409 &extent_dev,
6410 @@ -3397,8 +3416,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
6411 struct btrfs_device *scrub_dev,
6412 u64 chunk_offset, u64 length,
6413 u64 dev_offset,
6414 - struct btrfs_block_group_cache *cache,
6415 - int is_dev_replace)
6416 + struct btrfs_block_group_cache *cache)
6417 {
6418 struct btrfs_fs_info *fs_info = sctx->fs_info;
6419 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
6420 @@ -3435,8 +3453,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
6421 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
6422 map->stripes[i].physical == dev_offset) {
6423 ret = scrub_stripe(sctx, map, scrub_dev, i,
6424 - chunk_offset, length,
6425 - is_dev_replace);
6426 + chunk_offset, length);
6427 if (ret)
6428 goto out;
6429 }
6430 @@ -3449,8 +3466,7 @@ out:
6431
6432 static noinline_for_stack
6433 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
6434 - struct btrfs_device *scrub_dev, u64 start, u64 end,
6435 - int is_dev_replace)
6436 + struct btrfs_device *scrub_dev, u64 start, u64 end)
6437 {
6438 struct btrfs_dev_extent *dev_extent = NULL;
6439 struct btrfs_path *path;
6440 @@ -3544,7 +3560,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
6441 */
6442 scrub_pause_on(fs_info);
6443 ret = btrfs_inc_block_group_ro(cache);
6444 - if (!ret && is_dev_replace) {
6445 + if (!ret && sctx->is_dev_replace) {
6446 /*
6447 * If we are doing a device replace wait for any tasks
6448 * that started dellaloc right before we set the block
6449 @@ -3609,7 +3625,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
6450 dev_replace->item_needs_writeback = 1;
6451 btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
6452 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
6453 - found_key.offset, cache, is_dev_replace);
6454 + found_key.offset, cache);
6455
6456 /*
6457 * flush, submit all pending read and write bios, afterwards
6458 @@ -3670,7 +3686,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
6459 btrfs_put_block_group(cache);
6460 if (ret)
6461 break;
6462 - if (is_dev_replace &&
6463 + if (sctx->is_dev_replace &&
6464 atomic64_read(&dev_replace->num_write_errors) > 0) {
6465 ret = -EIO;
6466 break;
6467 @@ -3762,16 +3778,6 @@ fail_scrub_workers:
6468 return -ENOMEM;
6469 }
6470
6471 -static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
6472 -{
6473 - if (--fs_info->scrub_workers_refcnt == 0) {
6474 - btrfs_destroy_workqueue(fs_info->scrub_workers);
6475 - btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
6476 - btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
6477 - }
6478 - WARN_ON(fs_info->scrub_workers_refcnt < 0);
6479 -}
6480 -
6481 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6482 u64 end, struct btrfs_scrub_progress *progress,
6483 int readonly, int is_dev_replace)
6484 @@ -3779,6 +3785,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6485 struct scrub_ctx *sctx;
6486 int ret;
6487 struct btrfs_device *dev;
6488 + unsigned int nofs_flag;
6489 + struct btrfs_workqueue *scrub_workers = NULL;
6490 + struct btrfs_workqueue *scrub_wr_comp = NULL;
6491 + struct btrfs_workqueue *scrub_parity = NULL;
6492
6493 if (btrfs_fs_closing(fs_info))
6494 return -EINVAL;
6495 @@ -3820,13 +3830,18 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6496 return -EINVAL;
6497 }
6498
6499 + /* Allocate outside of device_list_mutex */
6500 + sctx = scrub_setup_ctx(fs_info, is_dev_replace);
6501 + if (IS_ERR(sctx))
6502 + return PTR_ERR(sctx);
6503
6504 mutex_lock(&fs_info->fs_devices->device_list_mutex);
6505 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
6506 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
6507 !is_dev_replace)) {
6508 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6509 - return -ENODEV;
6510 + ret = -ENODEV;
6511 + goto out_free_ctx;
6512 }
6513
6514 if (!is_dev_replace && !readonly &&
6515 @@ -3834,7 +3849,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6516 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6517 btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
6518 rcu_str_deref(dev->name));
6519 - return -EROFS;
6520 + ret = -EROFS;
6521 + goto out_free_ctx;
6522 }
6523
6524 mutex_lock(&fs_info->scrub_lock);
6525 @@ -3842,7 +3858,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6526 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
6527 mutex_unlock(&fs_info->scrub_lock);
6528 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6529 - return -EIO;
6530 + ret = -EIO;
6531 + goto out_free_ctx;
6532 }
6533
6534 btrfs_dev_replace_read_lock(&fs_info->dev_replace);
6535 @@ -3852,7 +3869,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6536 btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
6537 mutex_unlock(&fs_info->scrub_lock);
6538 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6539 - return -EINPROGRESS;
6540 + ret = -EINPROGRESS;
6541 + goto out_free_ctx;
6542 }
6543 btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
6544
6545 @@ -3860,16 +3878,9 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6546 if (ret) {
6547 mutex_unlock(&fs_info->scrub_lock);
6548 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6549 - return ret;
6550 + goto out_free_ctx;
6551 }
6552
6553 - sctx = scrub_setup_ctx(dev, is_dev_replace);
6554 - if (IS_ERR(sctx)) {
6555 - mutex_unlock(&fs_info->scrub_lock);
6556 - mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6557 - scrub_workers_put(fs_info);
6558 - return PTR_ERR(sctx);
6559 - }
6560 sctx->readonly = readonly;
6561 dev->scrub_ctx = sctx;
6562 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6563 @@ -3882,6 +3893,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6564 atomic_inc(&fs_info->scrubs_running);
6565 mutex_unlock(&fs_info->scrub_lock);
6566
6567 + /*
6568 + * In order to avoid deadlock with reclaim when there is a transaction
6569 + * trying to pause scrub, make sure we use GFP_NOFS for all the
6570 + * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
6571 + * invoked by our callees. The pausing request is done when the
6572 + * transaction commit starts, and it blocks the transaction until scrub
6573 + * is paused (done at specific points at scrub_stripe() or right above
6574 + * before incrementing fs_info->scrubs_running).
6575 + */
6576 + nofs_flag = memalloc_nofs_save();
6577 if (!is_dev_replace) {
6578 /*
6579 * by holding device list mutex, we can
6580 @@ -3893,8 +3914,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6581 }
6582
6583 if (!ret)
6584 - ret = scrub_enumerate_chunks(sctx, dev, start, end,
6585 - is_dev_replace);
6586 + ret = scrub_enumerate_chunks(sctx, dev, start, end);
6587 + memalloc_nofs_restore(nofs_flag);
6588
6589 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
6590 atomic_dec(&fs_info->scrubs_running);
6591 @@ -3907,11 +3928,23 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6592
6593 mutex_lock(&fs_info->scrub_lock);
6594 dev->scrub_ctx = NULL;
6595 - scrub_workers_put(fs_info);
6596 + if (--fs_info->scrub_workers_refcnt == 0) {
6597 + scrub_workers = fs_info->scrub_workers;
6598 + scrub_wr_comp = fs_info->scrub_wr_completion_workers;
6599 + scrub_parity = fs_info->scrub_parity_workers;
6600 + }
6601 mutex_unlock(&fs_info->scrub_lock);
6602
6603 + btrfs_destroy_workqueue(scrub_workers);
6604 + btrfs_destroy_workqueue(scrub_wr_comp);
6605 + btrfs_destroy_workqueue(scrub_parity);
6606 scrub_put_ctx(sctx);
6607
6608 + return ret;
6609 +
6610 +out_free_ctx:
6611 + scrub_free_ctx(sctx);
6612 +
6613 return ret;
6614 }
6615
6616 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
6617 index 6e008bd5c8cd..a8297e7489d9 100644
6618 --- a/fs/btrfs/volumes.c
6619 +++ b/fs/btrfs/volumes.c
6620 @@ -7411,6 +7411,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
6621 struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
6622 struct extent_map *em;
6623 struct map_lookup *map;
6624 + struct btrfs_device *dev;
6625 u64 stripe_len;
6626 bool found = false;
6627 int ret = 0;
6628 @@ -7460,6 +7461,34 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
6629 physical_offset, devid);
6630 ret = -EUCLEAN;
6631 }
6632 +
6633 + /* Make sure no dev extent is beyond device bondary */
6634 + dev = btrfs_find_device(fs_info, devid, NULL, NULL);
6635 + if (!dev) {
6636 + btrfs_err(fs_info, "failed to find devid %llu", devid);
6637 + ret = -EUCLEAN;
6638 + goto out;
6639 + }
6640 +
6641 + /* It's possible this device is a dummy for seed device */
6642 + if (dev->disk_total_bytes == 0) {
6643 + dev = find_device(fs_info->fs_devices->seed, devid, NULL);
6644 + if (!dev) {
6645 + btrfs_err(fs_info, "failed to find seed devid %llu",
6646 + devid);
6647 + ret = -EUCLEAN;
6648 + goto out;
6649 + }
6650 + }
6651 +
6652 + if (physical_offset + physical_len > dev->disk_total_bytes) {
6653 + btrfs_err(fs_info,
6654 +"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
6655 + devid, physical_offset, physical_len,
6656 + dev->disk_total_bytes);
6657 + ret = -EUCLEAN;
6658 + goto out;
6659 + }
6660 out:
6661 free_extent_map(em);
6662 return ret;
6663 diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
6664 index 11f19432a74c..665a86f83f4b 100644
6665 --- a/fs/ceph/inode.c
6666 +++ b/fs/ceph/inode.c
6667 @@ -528,13 +528,16 @@ static void ceph_i_callback(struct rcu_head *head)
6668 kmem_cache_free(ceph_inode_cachep, ci);
6669 }
6670
6671 -void ceph_destroy_inode(struct inode *inode)
6672 +void ceph_evict_inode(struct inode *inode)
6673 {
6674 struct ceph_inode_info *ci = ceph_inode(inode);
6675 struct ceph_inode_frag *frag;
6676 struct rb_node *n;
6677
6678 - dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
6679 + dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
6680 +
6681 + truncate_inode_pages_final(&inode->i_data);
6682 + clear_inode(inode);
6683
6684 ceph_fscache_unregister_inode_cookie(ci);
6685
6686 diff --git a/fs/ceph/super.c b/fs/ceph/super.c
6687 index c5cf46e43f2e..02528e11bf33 100644
6688 --- a/fs/ceph/super.c
6689 +++ b/fs/ceph/super.c
6690 @@ -827,9 +827,9 @@ static int ceph_remount(struct super_block *sb, int *flags, char *data)
6691
6692 static const struct super_operations ceph_super_ops = {
6693 .alloc_inode = ceph_alloc_inode,
6694 - .destroy_inode = ceph_destroy_inode,
6695 .write_inode = ceph_write_inode,
6696 .drop_inode = ceph_drop_inode,
6697 + .evict_inode = ceph_evict_inode,
6698 .sync_fs = ceph_sync_fs,
6699 .put_super = ceph_put_super,
6700 .remount_fs = ceph_remount,
6701 diff --git a/fs/ceph/super.h b/fs/ceph/super.h
6702 index 018019309790..6e968e48e5e4 100644
6703 --- a/fs/ceph/super.h
6704 +++ b/fs/ceph/super.h
6705 @@ -854,7 +854,7 @@ static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
6706 extern const struct inode_operations ceph_file_iops;
6707
6708 extern struct inode *ceph_alloc_inode(struct super_block *sb);
6709 -extern void ceph_destroy_inode(struct inode *inode);
6710 +extern void ceph_evict_inode(struct inode *inode);
6711 extern int ceph_drop_inode(struct inode *inode);
6712
6713 extern struct inode *ceph_get_inode(struct super_block *sb,
6714 diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
6715 index 9731d0d891e7..aba2b48d4da1 100644
6716 --- a/fs/cifs/cifs_fs_sb.h
6717 +++ b/fs/cifs/cifs_fs_sb.h
6718 @@ -72,5 +72,10 @@ struct cifs_sb_info {
6719 struct delayed_work prune_tlinks;
6720 struct rcu_head rcu;
6721 char *prepath;
6722 + /*
6723 + * Indicate whether serverino option was turned off later
6724 + * (cifs_autodisable_serverino) in order to match new mounts.
6725 + */
6726 + bool mnt_cifs_serverino_autodisabled;
6727 };
6728 #endif /* _CIFS_FS_SB_H */
6729 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
6730 index fb32f3d6925e..64e3888f30e6 100644
6731 --- a/fs/cifs/cifsfs.c
6732 +++ b/fs/cifs/cifsfs.c
6733 @@ -292,6 +292,7 @@ cifs_alloc_inode(struct super_block *sb)
6734 cifs_inode->uniqueid = 0;
6735 cifs_inode->createtime = 0;
6736 cifs_inode->epoch = 0;
6737 + spin_lock_init(&cifs_inode->open_file_lock);
6738 generate_random_uuid(cifs_inode->lease_key);
6739
6740 /*
6741 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
6742 index 6f227cc781e5..57af9bac0045 100644
6743 --- a/fs/cifs/cifsglob.h
6744 +++ b/fs/cifs/cifsglob.h
6745 @@ -1287,6 +1287,7 @@ struct cifsInodeInfo {
6746 struct rw_semaphore lock_sem; /* protect the fields above */
6747 /* BB add in lists for dirty pages i.e. write caching info for oplock */
6748 struct list_head openFileList;
6749 + spinlock_t open_file_lock; /* protects openFileList */
6750 __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
6751 unsigned int oplock; /* oplock/lease level we have */
6752 unsigned int epoch; /* used to track lease state changes */
6753 @@ -1563,6 +1564,25 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
6754 kfree(param);
6755 }
6756
6757 +static inline bool is_interrupt_error(int error)
6758 +{
6759 + switch (error) {
6760 + case -EINTR:
6761 + case -ERESTARTSYS:
6762 + case -ERESTARTNOHAND:
6763 + case -ERESTARTNOINTR:
6764 + return true;
6765 + }
6766 + return false;
6767 +}
6768 +
6769 +static inline bool is_retryable_error(int error)
6770 +{
6771 + if (is_interrupt_error(error) || error == -EAGAIN)
6772 + return true;
6773 + return false;
6774 +}
6775 +
6776 #define MID_FREE 0
6777 #define MID_REQUEST_ALLOCATED 1
6778 #define MID_REQUEST_SUBMITTED 2
6779 @@ -1668,10 +1688,14 @@ require use of the stronger protocol */
6780 * tcp_ses_lock protects:
6781 * list operations on tcp and SMB session lists
6782 * tcon->open_file_lock protects the list of open files hanging off the tcon
6783 + * inode->open_file_lock protects the openFileList hanging off the inode
6784 * cfile->file_info_lock protects counters and fields in cifs file struct
6785 * f_owner.lock protects certain per file struct operations
6786 * mapping->page_lock protects certain per page operations
6787 *
6788 + * Note that the cifs_tcon.open_file_lock should be taken before
6789 + * not after the cifsInodeInfo.open_file_lock
6790 + *
6791 * Semaphores
6792 * ----------
6793 * sesSem operations on smb session
6794 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
6795 index 269471c8f42b..86a54b809c48 100644
6796 --- a/fs/cifs/cifssmb.c
6797 +++ b/fs/cifs/cifssmb.c
6798 @@ -2033,16 +2033,17 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
6799
6800 wdata2->cfile = find_writable_file(CIFS_I(inode), false);
6801 if (!wdata2->cfile) {
6802 - cifs_dbg(VFS, "No writable handles for inode\n");
6803 + cifs_dbg(VFS, "No writable handle to retry writepages\n");
6804 rc = -EBADF;
6805 - break;
6806 + } else {
6807 + wdata2->pid = wdata2->cfile->pid;
6808 + rc = server->ops->async_writev(wdata2,
6809 + cifs_writedata_release);
6810 }
6811 - wdata2->pid = wdata2->cfile->pid;
6812 - rc = server->ops->async_writev(wdata2, cifs_writedata_release);
6813
6814 for (j = 0; j < nr_pages; j++) {
6815 unlock_page(wdata2->pages[j]);
6816 - if (rc != 0 && rc != -EAGAIN) {
6817 + if (rc != 0 && !is_retryable_error(rc)) {
6818 SetPageError(wdata2->pages[j]);
6819 end_page_writeback(wdata2->pages[j]);
6820 put_page(wdata2->pages[j]);
6821 @@ -2051,8 +2052,9 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
6822
6823 if (rc) {
6824 kref_put(&wdata2->refcount, cifs_writedata_release);
6825 - if (rc == -EAGAIN)
6826 + if (is_retryable_error(rc))
6827 continue;
6828 + i += nr_pages;
6829 break;
6830 }
6831
6832 @@ -2060,7 +2062,15 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
6833 i += nr_pages;
6834 } while (i < wdata->nr_pages);
6835
6836 - mapping_set_error(inode->i_mapping, rc);
6837 + /* cleanup remaining pages from the original wdata */
6838 + for (; i < wdata->nr_pages; i++) {
6839 + SetPageError(wdata->pages[i]);
6840 + end_page_writeback(wdata->pages[i]);
6841 + put_page(wdata->pages[i]);
6842 + }
6843 +
6844 + if (rc != 0 && !is_retryable_error(rc))
6845 + mapping_set_error(inode->i_mapping, rc);
6846 kref_put(&wdata->refcount, cifs_writedata_release);
6847 }
6848
6849 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
6850 index c53a2e86ed54..208430bb66fc 100644
6851 --- a/fs/cifs/connect.c
6852 +++ b/fs/cifs/connect.c
6853 @@ -3247,12 +3247,16 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
6854 {
6855 struct cifs_sb_info *old = CIFS_SB(sb);
6856 struct cifs_sb_info *new = mnt_data->cifs_sb;
6857 + unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
6858 + unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
6859
6860 if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
6861 return 0;
6862
6863 - if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) !=
6864 - (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
6865 + if (old->mnt_cifs_serverino_autodisabled)
6866 + newflags &= ~CIFS_MOUNT_SERVER_INUM;
6867 +
6868 + if (oldflags != newflags)
6869 return 0;
6870
6871 /*
6872 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
6873 index 23cee91ed442..8703b5f26f45 100644
6874 --- a/fs/cifs/file.c
6875 +++ b/fs/cifs/file.c
6876 @@ -336,10 +336,12 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
6877 list_add(&cfile->tlist, &tcon->openFileList);
6878
6879 /* if readable file instance put first in list*/
6880 + spin_lock(&cinode->open_file_lock);
6881 if (file->f_mode & FMODE_READ)
6882 list_add(&cfile->flist, &cinode->openFileList);
6883 else
6884 list_add_tail(&cfile->flist, &cinode->openFileList);
6885 + spin_unlock(&cinode->open_file_lock);
6886 spin_unlock(&tcon->open_file_lock);
6887
6888 if (fid->purge_cache)
6889 @@ -411,7 +413,9 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
6890 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
6891
6892 /* remove it from the lists */
6893 + spin_lock(&cifsi->open_file_lock);
6894 list_del(&cifs_file->flist);
6895 + spin_unlock(&cifsi->open_file_lock);
6896 list_del(&cifs_file->tlist);
6897
6898 if (list_empty(&cifsi->openFileList)) {
6899 @@ -749,7 +753,8 @@ reopen_success:
6900
6901 if (can_flush) {
6902 rc = filemap_write_and_wait(inode->i_mapping);
6903 - mapping_set_error(inode->i_mapping, rc);
6904 + if (!is_interrupt_error(rc))
6905 + mapping_set_error(inode->i_mapping, rc);
6906
6907 if (tcon->unix_ext)
6908 rc = cifs_get_inode_info_unix(&inode, full_path,
6909 @@ -1928,10 +1933,10 @@ refind_writable:
6910 if (!rc)
6911 return inv_file;
6912 else {
6913 - spin_lock(&tcon->open_file_lock);
6914 + spin_lock(&cifs_inode->open_file_lock);
6915 list_move_tail(&inv_file->flist,
6916 &cifs_inode->openFileList);
6917 - spin_unlock(&tcon->open_file_lock);
6918 + spin_unlock(&cifs_inode->open_file_lock);
6919 cifsFileInfo_put(inv_file);
6920 ++refind;
6921 inv_file = NULL;
6922 @@ -2137,6 +2142,7 @@ static int cifs_writepages(struct address_space *mapping,
6923 pgoff_t end, index;
6924 struct cifs_writedata *wdata;
6925 int rc = 0;
6926 + int saved_rc = 0;
6927
6928 /*
6929 * If wsize is smaller than the page cache size, default to writing
6930 @@ -2163,8 +2169,10 @@ retry:
6931
6932 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
6933 &wsize, &credits);
6934 - if (rc)
6935 + if (rc != 0) {
6936 + done = true;
6937 break;
6938 + }
6939
6940 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
6941
6942 @@ -2172,6 +2180,7 @@ retry:
6943 &found_pages);
6944 if (!wdata) {
6945 rc = -ENOMEM;
6946 + done = true;
6947 add_credits_and_wake_if(server, credits, 0);
6948 break;
6949 }
6950 @@ -2200,7 +2209,7 @@ retry:
6951 if (rc != 0) {
6952 add_credits_and_wake_if(server, wdata->credits, 0);
6953 for (i = 0; i < nr_pages; ++i) {
6954 - if (rc == -EAGAIN)
6955 + if (is_retryable_error(rc))
6956 redirty_page_for_writepage(wbc,
6957 wdata->pages[i]);
6958 else
6959 @@ -2208,7 +2217,7 @@ retry:
6960 end_page_writeback(wdata->pages[i]);
6961 put_page(wdata->pages[i]);
6962 }
6963 - if (rc != -EAGAIN)
6964 + if (!is_retryable_error(rc))
6965 mapping_set_error(mapping, rc);
6966 }
6967 kref_put(&wdata->refcount, cifs_writedata_release);
6968 @@ -2218,6 +2227,15 @@ retry:
6969 continue;
6970 }
6971
6972 + /* Return immediately if we received a signal during writing */
6973 + if (is_interrupt_error(rc)) {
6974 + done = true;
6975 + break;
6976 + }
6977 +
6978 + if (rc != 0 && saved_rc == 0)
6979 + saved_rc = rc;
6980 +
6981 wbc->nr_to_write -= nr_pages;
6982 if (wbc->nr_to_write <= 0)
6983 done = true;
6984 @@ -2235,6 +2253,9 @@ retry:
6985 goto retry;
6986 }
6987
6988 + if (saved_rc != 0)
6989 + rc = saved_rc;
6990 +
6991 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
6992 mapping->writeback_index = index;
6993
6994 @@ -2266,8 +2287,8 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
6995 set_page_writeback(page);
6996 retry_write:
6997 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
6998 - if (rc == -EAGAIN) {
6999 - if (wbc->sync_mode == WB_SYNC_ALL)
7000 + if (is_retryable_error(rc)) {
7001 + if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
7002 goto retry_write;
7003 redirty_page_for_writepage(wbc, page);
7004 } else if (rc != 0) {
7005 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
7006 index 1fadd314ae7f..53f3d08898af 100644
7007 --- a/fs/cifs/inode.c
7008 +++ b/fs/cifs/inode.c
7009 @@ -2261,6 +2261,11 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
7010 * the flush returns error?
7011 */
7012 rc = filemap_write_and_wait(inode->i_mapping);
7013 + if (is_interrupt_error(rc)) {
7014 + rc = -ERESTARTSYS;
7015 + goto out;
7016 + }
7017 +
7018 mapping_set_error(inode->i_mapping, rc);
7019 rc = 0;
7020
7021 @@ -2404,6 +2409,11 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
7022 * the flush returns error?
7023 */
7024 rc = filemap_write_and_wait(inode->i_mapping);
7025 + if (is_interrupt_error(rc)) {
7026 + rc = -ERESTARTSYS;
7027 + goto cifs_setattr_exit;
7028 + }
7029 +
7030 mapping_set_error(inode->i_mapping, rc);
7031 rc = 0;
7032
7033 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
7034 index facc94e159a1..e45f8e321371 100644
7035 --- a/fs/cifs/misc.c
7036 +++ b/fs/cifs/misc.c
7037 @@ -523,6 +523,7 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
7038 {
7039 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
7040 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
7041 + cifs_sb->mnt_cifs_serverino_autodisabled = true;
7042 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n",
7043 cifs_sb_master_tcon(cifs_sb)->treeName);
7044 }
7045 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
7046 index 2bc47eb6215e..cbe633f1840a 100644
7047 --- a/fs/cifs/smb2pdu.c
7048 +++ b/fs/cifs/smb2pdu.c
7049 @@ -712,6 +712,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
7050 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
7051 /* ops set to 3.0 by default for default so update */
7052 ses->server->ops = &smb21_operations;
7053 + ses->server->vals = &smb21_values;
7054 }
7055 } else if (le16_to_cpu(rsp->DialectRevision) !=
7056 ses->server->vals->protocol_id) {
7057 diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
7058 index 5fdb9a509a97..1959931e14c1 100644
7059 --- a/fs/cifs/smbdirect.c
7060 +++ b/fs/cifs/smbdirect.c
7061 @@ -2090,7 +2090,8 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
7062 * rqst: the data to write
7063 * return value: 0 if successfully write, otherwise error code
7064 */
7065 -int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
7066 +int smbd_send(struct TCP_Server_Info *server,
7067 + int num_rqst, struct smb_rqst *rqst_array)
7068 {
7069 struct smbd_connection *info = server->smbd_conn;
7070 struct kvec vec;
7071 @@ -2102,6 +2103,8 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
7072 info->max_send_size - sizeof(struct smbd_data_transfer);
7073 struct kvec *iov;
7074 int rc;
7075 + struct smb_rqst *rqst;
7076 + int rqst_idx;
7077
7078 info->smbd_send_pending++;
7079 if (info->transport_status != SMBD_CONNECTED) {
7080 @@ -2109,47 +2112,41 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
7081 goto done;
7082 }
7083
7084 - /*
7085 - * Skip the RFC1002 length defined in MS-SMB2 section 2.1
7086 - * It is used only for TCP transport in the iov[0]
7087 - * In future we may want to add a transport layer under protocol
7088 - * layer so this will only be issued to TCP transport
7089 - */
7090 -
7091 - if (rqst->rq_iov[0].iov_len != 4) {
7092 - log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
7093 - return -EINVAL;
7094 - }
7095 -
7096 /*
7097 * Add in the page array if there is one. The caller needs to set
7098 * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
7099 * ends at page boundary
7100 */
7101 - buflen = smb_rqst_len(server, rqst);
7102 + remaining_data_length = 0;
7103 + for (i = 0; i < num_rqst; i++)
7104 + remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
7105
7106 - if (buflen + sizeof(struct smbd_data_transfer) >
7107 + if (remaining_data_length + sizeof(struct smbd_data_transfer) >
7108 info->max_fragmented_send_size) {
7109 log_write(ERR, "payload size %d > max size %d\n",
7110 - buflen, info->max_fragmented_send_size);
7111 + remaining_data_length, info->max_fragmented_send_size);
7112 rc = -EINVAL;
7113 goto done;
7114 }
7115
7116 - iov = &rqst->rq_iov[1];
7117 + rqst_idx = 0;
7118 +
7119 +next_rqst:
7120 + rqst = &rqst_array[rqst_idx];
7121 + iov = rqst->rq_iov;
7122
7123 - cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
7124 - for (i = 0; i < rqst->rq_nvec-1; i++)
7125 + cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
7126 + rqst_idx, smb_rqst_len(server, rqst));
7127 + for (i = 0; i < rqst->rq_nvec; i++)
7128 dump_smb(iov[i].iov_base, iov[i].iov_len);
7129
7130 - remaining_data_length = buflen;
7131
7132 - log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
7133 - "rq_tailsz=%d buflen=%d\n",
7134 - rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
7135 - rqst->rq_tailsz, buflen);
7136 + log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
7137 + "rq_tailsz=%d buflen=%lu\n",
7138 + rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
7139 + rqst->rq_tailsz, smb_rqst_len(server, rqst));
7140
7141 - start = i = iov[0].iov_len ? 0 : 1;
7142 + start = i = 0;
7143 buflen = 0;
7144 while (true) {
7145 buflen += iov[i].iov_len;
7146 @@ -2197,14 +2194,14 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
7147 goto done;
7148 }
7149 i++;
7150 - if (i == rqst->rq_nvec-1)
7151 + if (i == rqst->rq_nvec)
7152 break;
7153 }
7154 start = i;
7155 buflen = 0;
7156 } else {
7157 i++;
7158 - if (i == rqst->rq_nvec-1) {
7159 + if (i == rqst->rq_nvec) {
7160 /* send out all remaining vecs */
7161 remaining_data_length -= buflen;
7162 log_write(INFO,
7163 @@ -2248,6 +2245,10 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
7164 }
7165 }
7166
7167 + rqst_idx++;
7168 + if (rqst_idx < num_rqst)
7169 + goto next_rqst;
7170 +
7171 done:
7172 /*
7173 * As an optimization, we don't wait for individual I/O to finish
7174 diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h
7175 index a11096254f29..b5c240ff2191 100644
7176 --- a/fs/cifs/smbdirect.h
7177 +++ b/fs/cifs/smbdirect.h
7178 @@ -292,7 +292,8 @@ void smbd_destroy(struct smbd_connection *info);
7179
7180 /* Interface for carrying upper layer I/O through send/recv */
7181 int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
7182 -int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst);
7183 +int smbd_send(struct TCP_Server_Info *server,
7184 + int num_rqst, struct smb_rqst *rqst);
7185
7186 enum mr_state {
7187 MR_READY,
7188 @@ -332,7 +333,7 @@ static inline void *smbd_get_connection(
7189 static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
7190 static inline void smbd_destroy(struct smbd_connection *info) {}
7191 static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
7192 -static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; }
7193 +static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
7194 #endif
7195
7196 #endif
7197 diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
7198 index f2938bd95c40..fe77f41bff9f 100644
7199 --- a/fs/cifs/transport.c
7200 +++ b/fs/cifs/transport.c
7201 @@ -287,7 +287,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
7202 __be32 rfc1002_marker;
7203
7204 if (cifs_rdma_enabled(server) && server->smbd_conn) {
7205 - rc = smbd_send(server, rqst);
7206 + rc = smbd_send(server, num_rqst, rqst);
7207 goto smbd_done;
7208 }
7209 if (ssocket == NULL)
7210 diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
7211 index 913061c0de1b..e8e27cdc2f67 100644
7212 --- a/fs/ext4/block_validity.c
7213 +++ b/fs/ext4/block_validity.c
7214 @@ -137,6 +137,49 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
7215 printk(KERN_CONT "\n");
7216 }
7217
7218 +static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
7219 +{
7220 + struct inode *inode;
7221 + struct ext4_sb_info *sbi = EXT4_SB(sb);
7222 + struct ext4_map_blocks map;
7223 + u32 i = 0, num;
7224 + int err = 0, n;
7225 +
7226 + if ((ino < EXT4_ROOT_INO) ||
7227 + (ino > le32_to_cpu(sbi->s_es->s_inodes_count)))
7228 + return -EINVAL;
7229 + inode = ext4_iget(sb, ino, EXT4_IGET_SPECIAL);
7230 + if (IS_ERR(inode))
7231 + return PTR_ERR(inode);
7232 + num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
7233 + while (i < num) {
7234 + map.m_lblk = i;
7235 + map.m_len = num - i;
7236 + n = ext4_map_blocks(NULL, inode, &map, 0);
7237 + if (n < 0) {
7238 + err = n;
7239 + break;
7240 + }
7241 + if (n == 0) {
7242 + i++;
7243 + } else {
7244 + if (!ext4_data_block_valid(sbi, map.m_pblk, n)) {
7245 + ext4_error(sb, "blocks %llu-%llu from inode %u "
7246 + "overlap system zone", map.m_pblk,
7247 + map.m_pblk + map.m_len - 1, ino);
7248 + err = -EFSCORRUPTED;
7249 + break;
7250 + }
7251 + err = add_system_zone(sbi, map.m_pblk, n);
7252 + if (err < 0)
7253 + break;
7254 + i += n;
7255 + }
7256 + }
7257 + iput(inode);
7258 + return err;
7259 +}
7260 +
7261 int ext4_setup_system_zone(struct super_block *sb)
7262 {
7263 ext4_group_t ngroups = ext4_get_groups_count(sb);
7264 @@ -171,6 +214,12 @@ int ext4_setup_system_zone(struct super_block *sb)
7265 if (ret)
7266 return ret;
7267 }
7268 + if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
7269 + ret = ext4_protect_reserved_inode(sb,
7270 + le32_to_cpu(sbi->s_es->s_journal_inum));
7271 + if (ret)
7272 + return ret;
7273 + }
7274
7275 if (test_opt(sb, DEBUG))
7276 debug_print_tree(sbi);
7277 @@ -227,6 +276,11 @@ int ext4_check_blockref(const char *function, unsigned int line,
7278 __le32 *bref = p;
7279 unsigned int blk;
7280
7281 + if (ext4_has_feature_journal(inode->i_sb) &&
7282 + (inode->i_ino ==
7283 + le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
7284 + return 0;
7285 +
7286 while (bref < p+max) {
7287 blk = le32_to_cpu(*bref++);
7288 if (blk &&
7289 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
7290 index 45aea792d22a..00bf0b67aae8 100644
7291 --- a/fs/ext4/extents.c
7292 +++ b/fs/ext4/extents.c
7293 @@ -518,10 +518,14 @@ __read_extent_tree_block(const char *function, unsigned int line,
7294 }
7295 if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
7296 return bh;
7297 - err = __ext4_ext_check(function, line, inode,
7298 - ext_block_hdr(bh), depth, pblk);
7299 - if (err)
7300 - goto errout;
7301 + if (!ext4_has_feature_journal(inode->i_sb) ||
7302 + (inode->i_ino !=
7303 + le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
7304 + err = __ext4_ext_check(function, line, inode,
7305 + ext_block_hdr(bh), depth, pblk);
7306 + if (err)
7307 + goto errout;
7308 + }
7309 set_buffer_verified(bh);
7310 /*
7311 * If this is a leaf block, cache all of its entries
7312 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
7313 index e65559bf7728..cff6277f7a9f 100644
7314 --- a/fs/ext4/inode.c
7315 +++ b/fs/ext4/inode.c
7316 @@ -399,6 +399,10 @@ static int __check_block_validity(struct inode *inode, const char *func,
7317 unsigned int line,
7318 struct ext4_map_blocks *map)
7319 {
7320 + if (ext4_has_feature_journal(inode->i_sb) &&
7321 + (inode->i_ino ==
7322 + le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
7323 + return 0;
7324 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
7325 map->m_len)) {
7326 ext4_error_inode(inode, func, line, map->m_pblk,
7327 diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
7328 index 75fe92eaa681..1624618c2bc7 100644
7329 --- a/fs/nfs/delegation.c
7330 +++ b/fs/nfs/delegation.c
7331 @@ -153,7 +153,7 @@ again:
7332 /* Block nfs4_proc_unlck */
7333 mutex_lock(&sp->so_delegreturn_mutex);
7334 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
7335 - err = nfs4_open_delegation_recall(ctx, state, stateid, type);
7336 + err = nfs4_open_delegation_recall(ctx, state, stateid);
7337 if (!err)
7338 err = nfs_delegation_claim_locks(ctx, state, stateid);
7339 if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
7340 diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
7341 index bb1ef8c37af4..c95477823fa6 100644
7342 --- a/fs/nfs/delegation.h
7343 +++ b/fs/nfs/delegation.h
7344 @@ -61,7 +61,7 @@ void nfs_reap_expired_delegations(struct nfs_client *clp);
7345
7346 /* NFSv4 delegation-related procedures */
7347 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
7348 -int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
7349 +int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
7350 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
7351 bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, struct rpc_cred **cred);
7352 bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
7353 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
7354 index 31ae3bd5d9d2..621e3cf90f4e 100644
7355 --- a/fs/nfs/nfs4proc.c
7356 +++ b/fs/nfs/nfs4proc.c
7357 @@ -2113,12 +2113,10 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
7358 case -NFS4ERR_BAD_HIGH_SLOT:
7359 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
7360 case -NFS4ERR_DEADSESSION:
7361 - set_bit(NFS_DELEGATED_STATE, &state->flags);
7362 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
7363 return -EAGAIN;
7364 case -NFS4ERR_STALE_CLIENTID:
7365 case -NFS4ERR_STALE_STATEID:
7366 - set_bit(NFS_DELEGATED_STATE, &state->flags);
7367 /* Don't recall a delegation if it was lost */
7368 nfs4_schedule_lease_recovery(server->nfs_client);
7369 return -EAGAIN;
7370 @@ -2139,7 +2137,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
7371 return -EAGAIN;
7372 case -NFS4ERR_DELAY:
7373 case -NFS4ERR_GRACE:
7374 - set_bit(NFS_DELEGATED_STATE, &state->flags);
7375 ssleep(1);
7376 return -EAGAIN;
7377 case -ENOMEM:
7378 @@ -2155,8 +2152,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
7379 }
7380
7381 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
7382 - struct nfs4_state *state, const nfs4_stateid *stateid,
7383 - fmode_t type)
7384 + struct nfs4_state *state, const nfs4_stateid *stateid)
7385 {
7386 struct nfs_server *server = NFS_SERVER(state->inode);
7387 struct nfs4_opendata *opendata;
7388 @@ -2167,20 +2163,23 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
7389 if (IS_ERR(opendata))
7390 return PTR_ERR(opendata);
7391 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
7392 - nfs_state_clear_delegation(state);
7393 - switch (type & (FMODE_READ|FMODE_WRITE)) {
7394 - case FMODE_READ|FMODE_WRITE:
7395 - case FMODE_WRITE:
7396 + if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
7397 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
7398 if (err)
7399 - break;
7400 + goto out;
7401 + }
7402 + if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
7403 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
7404 if (err)
7405 - break;
7406 - /* Fall through */
7407 - case FMODE_READ:
7408 + goto out;
7409 + }
7410 + if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
7411 err = nfs4_open_recover_helper(opendata, FMODE_READ);
7412 + if (err)
7413 + goto out;
7414 }
7415 + nfs_state_clear_delegation(state);
7416 +out:
7417 nfs4_opendata_put(opendata);
7418 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
7419 }
7420 diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
7421 index 8cf2218b46a7..6f90d91a8733 100644
7422 --- a/fs/pstore/inode.c
7423 +++ b/fs/pstore/inode.c
7424 @@ -330,10 +330,6 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
7425 goto fail;
7426 inode->i_mode = S_IFREG | 0444;
7427 inode->i_fop = &pstore_file_operations;
7428 - private = kzalloc(sizeof(*private), GFP_KERNEL);
7429 - if (!private)
7430 - goto fail_alloc;
7431 - private->record = record;
7432
7433 switch (record->type) {
7434 case PSTORE_TYPE_DMESG:
7435 @@ -383,12 +379,16 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
7436 break;
7437 }
7438
7439 + private = kzalloc(sizeof(*private), GFP_KERNEL);
7440 + if (!private)
7441 + goto fail_inode;
7442 +
7443 dentry = d_alloc_name(root, name);
7444 if (!dentry)
7445 goto fail_private;
7446
7447 + private->record = record;
7448 inode->i_size = private->total_size = size;
7449 -
7450 inode->i_private = private;
7451
7452 if (record->time.tv_sec)
7453 @@ -404,7 +404,7 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
7454
7455 fail_private:
7456 free_pstore_private(private);
7457 -fail_alloc:
7458 +fail_inode:
7459 iput(inode);
7460
7461 fail:
7462 diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
7463 index f9c6e0e3aec7..fa117e11458a 100644
7464 --- a/include/drm/drm_device.h
7465 +++ b/include/drm/drm_device.h
7466 @@ -174,7 +174,13 @@ struct drm_device {
7467 * races and imprecision over longer time periods, hence exposing a
7468 * hardware vblank counter is always recommended.
7469 *
7470 - * If non-zeor, &drm_crtc_funcs.get_vblank_counter must be set.
7471 + * This is the statically configured device wide maximum. The driver
7472 + * can instead choose to use a runtime configurable per-crtc value
7473 + * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count
7474 + * must be left at zero. See drm_crtc_set_max_vblank_count() on how
7475 + * to use the per-crtc value.
7476 + *
7477 + * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
7478 */
7479 u32 max_vblank_count; /**< size of vblank counter register */
7480
7481 diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
7482 index d25a9603ab57..e9c676381fd4 100644
7483 --- a/include/drm/drm_vblank.h
7484 +++ b/include/drm/drm_vblank.h
7485 @@ -128,6 +128,26 @@ struct drm_vblank_crtc {
7486 * @last: Protected by &drm_device.vbl_lock, used for wraparound handling.
7487 */
7488 u32 last;
7489 + /**
7490 + * @max_vblank_count:
7491 + *
7492 + * Maximum value of the vblank registers for this crtc. This value +1
7493 + * will result in a wrap-around of the vblank register. It is used
7494 + * by the vblank core to handle wrap-arounds.
7495 + *
7496 + * If set to zero the vblank core will try to guess the elapsed vblanks
7497 + * between times when the vblank interrupt is disabled through
7498 + * high-precision timestamps. That approach is suffering from small
7499 + * races and imprecision over longer time periods, hence exposing a
7500 + * hardware vblank counter is always recommended.
7501 + *
7502 + * This is the runtime configurable per-crtc maximum set through
7503 + * drm_crtc_set_max_vblank_count(). If this is used the driver
7504 + * must leave the device wide &drm_device.max_vblank_count at zero.
7505 + *
7506 + * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
7507 + */
7508 + u32 max_vblank_count;
7509 /**
7510 * @inmodeset: Tracks whether the vblank is disabled due to a modeset.
7511 * For legacy driver bit 2 additionally tracks whether an additional
7512 @@ -206,4 +226,6 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
7513 void drm_calc_timestamping_constants(struct drm_crtc *crtc,
7514 const struct drm_display_mode *mode);
7515 wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc);
7516 +void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
7517 + u32 max_vblank_count);
7518 #endif
7519 diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
7520 index bef2e36c01b4..91f9f95ad506 100644
7521 --- a/include/linux/device-mapper.h
7522 +++ b/include/linux/device-mapper.h
7523 @@ -62,7 +62,8 @@ typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
7524 struct request *rq,
7525 union map_info *map_context,
7526 struct request **clone);
7527 -typedef void (*dm_release_clone_request_fn) (struct request *clone);
7528 +typedef void (*dm_release_clone_request_fn) (struct request *clone,
7529 + union map_info *map_context);
7530
7531 /*
7532 * Returns:
7533 diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
7534 index acc4279ad5e3..412098b24f58 100644
7535 --- a/include/linux/gpio/consumer.h
7536 +++ b/include/linux/gpio/consumer.h
7537 @@ -222,7 +222,7 @@ static inline void gpiod_put(struct gpio_desc *desc)
7538 might_sleep();
7539
7540 /* GPIO can never have been requested */
7541 - WARN_ON(1);
7542 + WARN_ON(desc);
7543 }
7544
7545 static inline void gpiod_put_array(struct gpio_descs *descs)
7546 @@ -230,7 +230,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
7547 might_sleep();
7548
7549 /* GPIO can never have been requested */
7550 - WARN_ON(1);
7551 + WARN_ON(descs);
7552 }
7553
7554 static inline struct gpio_desc *__must_check
7555 @@ -283,7 +283,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
7556 might_sleep();
7557
7558 /* GPIO can never have been requested */
7559 - WARN_ON(1);
7560 + WARN_ON(desc);
7561 }
7562
7563 static inline void devm_gpiod_put_array(struct device *dev,
7564 @@ -292,32 +292,32 @@ static inline void devm_gpiod_put_array(struct device *dev,
7565 might_sleep();
7566
7567 /* GPIO can never have been requested */
7568 - WARN_ON(1);
7569 + WARN_ON(descs);
7570 }
7571
7572
7573 static inline int gpiod_get_direction(const struct gpio_desc *desc)
7574 {
7575 /* GPIO can never have been requested */
7576 - WARN_ON(1);
7577 + WARN_ON(desc);
7578 return -ENOSYS;
7579 }
7580 static inline int gpiod_direction_input(struct gpio_desc *desc)
7581 {
7582 /* GPIO can never have been requested */
7583 - WARN_ON(1);
7584 + WARN_ON(desc);
7585 return -ENOSYS;
7586 }
7587 static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
7588 {
7589 /* GPIO can never have been requested */
7590 - WARN_ON(1);
7591 + WARN_ON(desc);
7592 return -ENOSYS;
7593 }
7594 static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
7595 {
7596 /* GPIO can never have been requested */
7597 - WARN_ON(1);
7598 + WARN_ON(desc);
7599 return -ENOSYS;
7600 }
7601
7602 @@ -325,7 +325,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
7603 static inline int gpiod_get_value(const struct gpio_desc *desc)
7604 {
7605 /* GPIO can never have been requested */
7606 - WARN_ON(1);
7607 + WARN_ON(desc);
7608 return 0;
7609 }
7610 static inline int gpiod_get_array_value(unsigned int array_size,
7611 @@ -333,25 +333,25 @@ static inline int gpiod_get_array_value(unsigned int array_size,
7612 int *value_array)
7613 {
7614 /* GPIO can never have been requested */
7615 - WARN_ON(1);
7616 + WARN_ON(desc_array);
7617 return 0;
7618 }
7619 static inline void gpiod_set_value(struct gpio_desc *desc, int value)
7620 {
7621 /* GPIO can never have been requested */
7622 - WARN_ON(1);
7623 + WARN_ON(desc);
7624 }
7625 static inline void gpiod_set_array_value(unsigned int array_size,
7626 struct gpio_desc **desc_array,
7627 int *value_array)
7628 {
7629 /* GPIO can never have been requested */
7630 - WARN_ON(1);
7631 + WARN_ON(desc_array);
7632 }
7633 static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
7634 {
7635 /* GPIO can never have been requested */
7636 - WARN_ON(1);
7637 + WARN_ON(desc);
7638 return 0;
7639 }
7640 static inline int gpiod_get_raw_array_value(unsigned int array_size,
7641 @@ -359,27 +359,27 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
7642 int *value_array)
7643 {
7644 /* GPIO can never have been requested */
7645 - WARN_ON(1);
7646 + WARN_ON(desc_array);
7647 return 0;
7648 }
7649 static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
7650 {
7651 /* GPIO can never have been requested */
7652 - WARN_ON(1);
7653 + WARN_ON(desc);
7654 }
7655 static inline int gpiod_set_raw_array_value(unsigned int array_size,
7656 struct gpio_desc **desc_array,
7657 int *value_array)
7658 {
7659 /* GPIO can never have been requested */
7660 - WARN_ON(1);
7661 + WARN_ON(desc_array);
7662 return 0;
7663 }
7664
7665 static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
7666 {
7667 /* GPIO can never have been requested */
7668 - WARN_ON(1);
7669 + WARN_ON(desc);
7670 return 0;
7671 }
7672 static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
7673 @@ -387,25 +387,25 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
7674 int *value_array)
7675 {
7676 /* GPIO can never have been requested */
7677 - WARN_ON(1);
7678 + WARN_ON(desc_array);
7679 return 0;
7680 }
7681 static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
7682 {
7683 /* GPIO can never have been requested */
7684 - WARN_ON(1);
7685 + WARN_ON(desc);
7686 }
7687 static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
7688 struct gpio_desc **desc_array,
7689 int *value_array)
7690 {
7691 /* GPIO can never have been requested */
7692 - WARN_ON(1);
7693 + WARN_ON(desc_array);
7694 }
7695 static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
7696 {
7697 /* GPIO can never have been requested */
7698 - WARN_ON(1);
7699 + WARN_ON(desc);
7700 return 0;
7701 }
7702 static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
7703 @@ -413,55 +413,55 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
7704 int *value_array)
7705 {
7706 /* GPIO can never have been requested */
7707 - WARN_ON(1);
7708 + WARN_ON(desc_array);
7709 return 0;
7710 }
7711 static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
7712 int value)
7713 {
7714 /* GPIO can never have been requested */
7715 - WARN_ON(1);
7716 + WARN_ON(desc);
7717 }
7718 static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
7719 struct gpio_desc **desc_array,
7720 int *value_array)
7721 {
7722 /* GPIO can never have been requested */
7723 - WARN_ON(1);
7724 + WARN_ON(desc_array);
7725 return 0;
7726 }
7727
7728 static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
7729 {
7730 /* GPIO can never have been requested */
7731 - WARN_ON(1);
7732 + WARN_ON(desc);
7733 return -ENOSYS;
7734 }
7735
7736 static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
7737 {
7738 /* GPIO can never have been requested */
7739 - WARN_ON(1);
7740 + WARN_ON(desc);
7741 return -ENOSYS;
7742 }
7743
7744 static inline int gpiod_is_active_low(const struct gpio_desc *desc)
7745 {
7746 /* GPIO can never have been requested */
7747 - WARN_ON(1);
7748 + WARN_ON(desc);
7749 return 0;
7750 }
7751 static inline int gpiod_cansleep(const struct gpio_desc *desc)
7752 {
7753 /* GPIO can never have been requested */
7754 - WARN_ON(1);
7755 + WARN_ON(desc);
7756 return 0;
7757 }
7758
7759 static inline int gpiod_to_irq(const struct gpio_desc *desc)
7760 {
7761 /* GPIO can never have been requested */
7762 - WARN_ON(1);
7763 + WARN_ON(desc);
7764 return -EINVAL;
7765 }
7766
7767 @@ -469,7 +469,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
7768 const char *name)
7769 {
7770 /* GPIO can never have been requested */
7771 - WARN_ON(1);
7772 + WARN_ON(desc);
7773 return -EINVAL;
7774 }
7775
7776 @@ -481,7 +481,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
7777 static inline int desc_to_gpio(const struct gpio_desc *desc)
7778 {
7779 /* GPIO can never have been requested */
7780 - WARN_ON(1);
7781 + WARN_ON(desc);
7782 return -EINVAL;
7783 }
7784
7785 diff --git a/include/media/cec.h b/include/media/cec.h
7786 index dc4b412e8fa1..59bf280e9715 100644
7787 --- a/include/media/cec.h
7788 +++ b/include/media/cec.h
7789 @@ -333,67 +333,6 @@ void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
7790 u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
7791 unsigned int *offset);
7792
7793 -/**
7794 - * cec_set_edid_phys_addr() - find and set the physical address
7795 - *
7796 - * @edid: pointer to the EDID data
7797 - * @size: size in bytes of the EDID data
7798 - * @phys_addr: the new physical address
7799 - *
7800 - * This function finds the location of the physical address in the EDID
7801 - * and fills in the given physical address and updates the checksum
7802 - * at the end of the EDID block. It does nothing if the EDID doesn't
7803 - * contain a physical address.
7804 - */
7805 -void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
7806 -
7807 -/**
7808 - * cec_phys_addr_for_input() - calculate the PA for an input
7809 - *
7810 - * @phys_addr: the physical address of the parent
7811 - * @input: the number of the input port, must be between 1 and 15
7812 - *
7813 - * This function calculates a new physical address based on the input
7814 - * port number. For example:
7815 - *
7816 - * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
7817 - *
7818 - * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
7819 - *
7820 - * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
7821 - *
7822 - * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
7823 - *
7824 - * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
7825 - */
7826 -u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
7827 -
7828 -/**
7829 - * cec_phys_addr_validate() - validate a physical address from an EDID
7830 - *
7831 - * @phys_addr: the physical address to validate
7832 - * @parent: if not %NULL, then this is filled with the parents PA.
7833 - * @port: if not %NULL, then this is filled with the input port.
7834 - *
7835 - * This validates a physical address as read from an EDID. If the
7836 - * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
7837 - * then it will return -EINVAL.
7838 - *
7839 - * The parent PA is passed into %parent and the input port is passed into
7840 - * %port. For example:
7841 - *
7842 - * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
7843 - *
7844 - * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
7845 - *
7846 - * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
7847 - *
7848 - * PA = f.f.f.f: has parent f.f.f.f and input port 0.
7849 - *
7850 - * Return: 0 if the PA is valid, -EINVAL if not.
7851 - */
7852 -int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
7853 -
7854 #else
7855
7856 static inline int cec_register_adapter(struct cec_adapter *adap,
7857 @@ -428,25 +367,6 @@ static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
7858 return CEC_PHYS_ADDR_INVALID;
7859 }
7860
7861 -static inline void cec_set_edid_phys_addr(u8 *edid, unsigned int size,
7862 - u16 phys_addr)
7863 -{
7864 -}
7865 -
7866 -static inline u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
7867 -{
7868 - return CEC_PHYS_ADDR_INVALID;
7869 -}
7870 -
7871 -static inline int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
7872 -{
7873 - if (parent)
7874 - *parent = phys_addr;
7875 - if (port)
7876 - *port = 0;
7877 - return 0;
7878 -}
7879 -
7880 #endif
7881
7882 /**
7883 diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
7884 index 17cb27df1b81..4e7732d3908c 100644
7885 --- a/include/media/v4l2-dv-timings.h
7886 +++ b/include/media/v4l2-dv-timings.h
7887 @@ -234,4 +234,10 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
7888 const struct hdmi_vendor_infoframe *hdmi,
7889 unsigned int height);
7890
7891 +u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
7892 + unsigned int *offset);
7893 +void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
7894 +u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input);
7895 +int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
7896 +
7897 #endif
7898 diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
7899 index 67e0a990144a..468deae5d603 100644
7900 --- a/include/net/cfg80211.h
7901 +++ b/include/net/cfg80211.h
7902 @@ -6562,6 +6562,21 @@ int cfg80211_external_auth_request(struct net_device *netdev,
7903 struct cfg80211_external_auth_params *params,
7904 gfp_t gfp);
7905
7906 +/**
7907 + * cfg80211_iftype_allowed - check whether the interface can be allowed
7908 + * @wiphy: the wiphy
7909 + * @iftype: interface type
7910 + * @is_4addr: use_4addr flag, must be '0' when check_swif is '1'
7911 + * @check_swif: check iftype against software interfaces
7912 + *
7913 + * Check whether the interface is allowed to operate; additionally, this API
7914 + * can be used to check iftype against the software interfaces when
7915 + * check_swif is '1'.
7916 + */
7917 +bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
7918 + bool is_4addr, u8 check_swif);
7919 +
7920 +
7921 /* Logging, debugging and troubleshooting/diagnostic helpers. */
7922
7923 /* wiphy_printk helpers, similar to dev_printk */
7924 diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
7925 index 7b8c9e19bad1..0f3cb13db8e9 100644
7926 --- a/include/uapi/linux/keyctl.h
7927 +++ b/include/uapi/linux/keyctl.h
7928 @@ -65,7 +65,12 @@
7929
7930 /* keyctl structures */
7931 struct keyctl_dh_params {
7932 - __s32 private;
7933 + union {
7934 +#ifndef __cplusplus
7935 + __s32 private;
7936 +#endif
7937 + __s32 priv;
7938 + };
7939 __s32 prime;
7940 __s32 base;
7941 };
7942 diff --git a/kernel/module.c b/kernel/module.c
7943 index 3fda10c549a2..0d86fc73d63d 100644
7944 --- a/kernel/module.c
7945 +++ b/kernel/module.c
7946 @@ -76,14 +76,9 @@
7947
7948 /*
7949 * Modules' sections will be aligned on page boundaries
7950 - * to ensure complete separation of code and data, but
7951 - * only when CONFIG_STRICT_MODULE_RWX=y
7952 + * to ensure complete separation of code and data
7953 */
7954 -#ifdef CONFIG_STRICT_MODULE_RWX
7955 # define debug_align(X) ALIGN(X, PAGE_SIZE)
7956 -#else
7957 -# define debug_align(X) (X)
7958 -#endif
7959
7960 /* If this is set, the section belongs in the init part of the module */
7961 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
7962 @@ -1699,6 +1694,8 @@ static int add_usage_links(struct module *mod)
7963 return ret;
7964 }
7965
7966 +static void module_remove_modinfo_attrs(struct module *mod, int end);
7967 +
7968 static int module_add_modinfo_attrs(struct module *mod)
7969 {
7970 struct module_attribute *attr;
7971 @@ -1713,24 +1710,34 @@ static int module_add_modinfo_attrs(struct module *mod)
7972 return -ENOMEM;
7973
7974 temp_attr = mod->modinfo_attrs;
7975 - for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
7976 + for (i = 0; (attr = modinfo_attrs[i]); i++) {
7977 if (!attr->test || attr->test(mod)) {
7978 memcpy(temp_attr, attr, sizeof(*temp_attr));
7979 sysfs_attr_init(&temp_attr->attr);
7980 error = sysfs_create_file(&mod->mkobj.kobj,
7981 &temp_attr->attr);
7982 + if (error)
7983 + goto error_out;
7984 ++temp_attr;
7985 }
7986 }
7987 +
7988 + return 0;
7989 +
7990 +error_out:
7991 + if (i > 0)
7992 + module_remove_modinfo_attrs(mod, --i);
7993 return error;
7994 }
7995
7996 -static void module_remove_modinfo_attrs(struct module *mod)
7997 +static void module_remove_modinfo_attrs(struct module *mod, int end)
7998 {
7999 struct module_attribute *attr;
8000 int i;
8001
8002 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
8003 + if (end >= 0 && i > end)
8004 + break;
8005 /* pick a field to test for end of list */
8006 if (!attr->attr.name)
8007 break;
8008 @@ -1818,7 +1825,7 @@ static int mod_sysfs_setup(struct module *mod,
8009 return 0;
8010
8011 out_unreg_modinfo_attrs:
8012 - module_remove_modinfo_attrs(mod);
8013 + module_remove_modinfo_attrs(mod, -1);
8014 out_unreg_param:
8015 module_param_sysfs_remove(mod);
8016 out_unreg_holders:
8017 @@ -1854,7 +1861,7 @@ static void mod_sysfs_fini(struct module *mod)
8018 {
8019 }
8020
8021 -static void module_remove_modinfo_attrs(struct module *mod)
8022 +static void module_remove_modinfo_attrs(struct module *mod, int end)
8023 {
8024 }
8025
8026 @@ -1870,7 +1877,7 @@ static void init_param_lock(struct module *mod)
8027 static void mod_sysfs_teardown(struct module *mod)
8028 {
8029 del_usage_links(mod);
8030 - module_remove_modinfo_attrs(mod);
8031 + module_remove_modinfo_attrs(mod, -1);
8032 module_param_sysfs_remove(mod);
8033 kobject_put(mod->mkobj.drivers_dir);
8034 kobject_put(mod->holders_dir);
8035 diff --git a/kernel/resource.c b/kernel/resource.c
8036 index 30e1bc68503b..bce773cc5e41 100644
8037 --- a/kernel/resource.c
8038 +++ b/kernel/resource.c
8039 @@ -318,24 +318,27 @@ int release_resource(struct resource *old)
8040
8041 EXPORT_SYMBOL(release_resource);
8042
8043 -/*
8044 - * Finds the lowest iomem resource existing within [res->start.res->end).
8045 - * The caller must specify res->start, res->end, res->flags, and optionally
8046 - * desc. If found, returns 0, res is overwritten, if not found, returns -1.
8047 - * This function walks the whole tree and not just first level children until
8048 - * and unless first_level_children_only is true.
8049 +/**
8050 + * Finds the lowest iomem resource that covers part of [start..end]. The
8051 + * caller must specify start, end, flags, and desc (which may be
8052 + * IORES_DESC_NONE).
8053 + *
8054 + * If a resource is found, returns 0 and *res is overwritten with the part
8055 + * of the resource that's within [start..end]; if none is found, returns
8056 + * -ENODEV. Returns -EINVAL for invalid parameters.
8057 + *
8058 + * This function walks the whole tree and not just first level children
8059 + * unless @first_level_children_only is true.
8060 */
8061 -static int find_next_iomem_res(struct resource *res, unsigned long desc,
8062 - bool first_level_children_only)
8063 +static int find_next_iomem_res(resource_size_t start, resource_size_t end,
8064 + unsigned long flags, unsigned long desc,
8065 + bool first_level_children_only,
8066 + struct resource *res)
8067 {
8068 - resource_size_t start, end;
8069 struct resource *p;
8070 bool sibling_only = false;
8071
8072 BUG_ON(!res);
8073 -
8074 - start = res->start;
8075 - end = res->end;
8076 BUG_ON(start >= end);
8077
8078 if (first_level_children_only)
8079 @@ -344,7 +347,7 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
8080 read_lock(&resource_lock);
8081
8082 for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) {
8083 - if ((p->flags & res->flags) != res->flags)
8084 + if ((p->flags & flags) != flags)
8085 continue;
8086 if ((desc != IORES_DESC_NONE) && (desc != p->desc))
8087 continue;
8088 @@ -352,39 +355,38 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
8089 p = NULL;
8090 break;
8091 }
8092 - if ((p->end >= start) && (p->start < end))
8093 + if ((p->end >= start) && (p->start <= end))
8094 break;
8095 }
8096
8097 + if (p) {
8098 + /* copy data */
8099 + res->start = max(start, p->start);
8100 + res->end = min(end, p->end);
8101 + res->flags = p->flags;
8102 + res->desc = p->desc;
8103 + }
8104 +
8105 read_unlock(&resource_lock);
8106 - if (!p)
8107 - return -1;
8108 - /* copy data */
8109 - if (res->start < p->start)
8110 - res->start = p->start;
8111 - if (res->end > p->end)
8112 - res->end = p->end;
8113 - res->flags = p->flags;
8114 - res->desc = p->desc;
8115 - return 0;
8116 + return p ? 0 : -ENODEV;
8117 }
8118
8119 -static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
8120 - bool first_level_children_only,
8121 - void *arg,
8122 +static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
8123 + unsigned long flags, unsigned long desc,
8124 + bool first_level_children_only, void *arg,
8125 int (*func)(struct resource *, void *))
8126 {
8127 - u64 orig_end = res->end;
8128 + struct resource res;
8129 int ret = -1;
8130
8131 - while ((res->start < res->end) &&
8132 - !find_next_iomem_res(res, desc, first_level_children_only)) {
8133 - ret = (*func)(res, arg);
8134 + while (start < end &&
8135 + !find_next_iomem_res(start, end, flags, desc,
8136 + first_level_children_only, &res)) {
8137 + ret = (*func)(&res, arg);
8138 if (ret)
8139 break;
8140
8141 - res->start = res->end + 1;
8142 - res->end = orig_end;
8143 + start = res.end + 1;
8144 }
8145
8146 return ret;
8147 @@ -407,13 +409,7 @@ static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
8148 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
8149 u64 end, void *arg, int (*func)(struct resource *, void *))
8150 {
8151 - struct resource res;
8152 -
8153 - res.start = start;
8154 - res.end = end;
8155 - res.flags = flags;
8156 -
8157 - return __walk_iomem_res_desc(&res, desc, false, arg, func);
8158 + return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func);
8159 }
8160 EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
8161
8162 @@ -427,13 +423,9 @@ EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
8163 int walk_system_ram_res(u64 start, u64 end, void *arg,
8164 int (*func)(struct resource *, void *))
8165 {
8166 - struct resource res;
8167 + unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
8168
8169 - res.start = start;
8170 - res.end = end;
8171 - res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
8172 -
8173 - return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
8174 + return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
8175 arg, func);
8176 }
8177
8178 @@ -444,13 +436,9 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
8179 int walk_mem_res(u64 start, u64 end, void *arg,
8180 int (*func)(struct resource *, void *))
8181 {
8182 - struct resource res;
8183 + unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
8184
8185 - res.start = start;
8186 - res.end = end;
8187 - res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
8188 -
8189 - return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
8190 + return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
8191 arg, func);
8192 }
8193
8194 @@ -464,25 +452,25 @@ int walk_mem_res(u64 start, u64 end, void *arg,
8195 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
8196 void *arg, int (*func)(unsigned long, unsigned long, void *))
8197 {
8198 + resource_size_t start, end;
8199 + unsigned long flags;
8200 struct resource res;
8201 unsigned long pfn, end_pfn;
8202 - u64 orig_end;
8203 int ret = -1;
8204
8205 - res.start = (u64) start_pfn << PAGE_SHIFT;
8206 - res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
8207 - res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
8208 - orig_end = res.end;
8209 - while ((res.start < res.end) &&
8210 - (find_next_iomem_res(&res, IORES_DESC_NONE, true) >= 0)) {
8211 + start = (u64) start_pfn << PAGE_SHIFT;
8212 + end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
8213 + flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
8214 + while (start < end &&
8215 + !find_next_iomem_res(start, end, flags, IORES_DESC_NONE,
8216 + true, &res)) {
8217 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
8218 end_pfn = (res.end + 1) >> PAGE_SHIFT;
8219 if (end_pfn > pfn)
8220 ret = (*func)(pfn, end_pfn - pfn, arg);
8221 if (ret)
8222 break;
8223 - res.start = res.end + 1;
8224 - res.end = orig_end;
8225 + start = res.end + 1;
8226 }
8227 return ret;
8228 }
8229 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
8230 index 75f322603d44..49ed38914669 100644
8231 --- a/kernel/sched/fair.c
8232 +++ b/kernel/sched/fair.c
8233 @@ -4420,6 +4420,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
8234 if (likely(cfs_rq->runtime_remaining > 0))
8235 return;
8236
8237 + if (cfs_rq->throttled)
8238 + return;
8239 /*
8240 * if we're unable to extend our runtime we resched so that the active
8241 * hierarchy can be throttled
8242 @@ -4615,6 +4617,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
8243 if (!cfs_rq_throttled(cfs_rq))
8244 goto next;
8245
8246 + /* By the above check, this should never be true */
8247 + SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
8248 +
8249 runtime = -cfs_rq->runtime_remaining + 1;
8250 if (runtime > remaining)
8251 runtime = remaining;
8252 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
8253 index 443edcddac8a..c2708e1f0c69 100644
8254 --- a/kernel/time/timekeeping.c
8255 +++ b/kernel/time/timekeeping.c
8256 @@ -823,7 +823,7 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
8257
8258 } while (read_seqcount_retry(&tk_core.seq, seq));
8259
8260 - return base + nsecs;
8261 + return ktime_add_ns(base, nsecs);
8262 }
8263 EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
8264
8265 diff --git a/mm/migrate.c b/mm/migrate.c
8266 index b2ea7d1e6f24..0c48191a9036 100644
8267 --- a/mm/migrate.c
8268 +++ b/mm/migrate.c
8269 @@ -2328,16 +2328,13 @@ next:
8270 */
8271 static void migrate_vma_collect(struct migrate_vma *migrate)
8272 {
8273 - struct mm_walk mm_walk;
8274 -
8275 - mm_walk.pmd_entry = migrate_vma_collect_pmd;
8276 - mm_walk.pte_entry = NULL;
8277 - mm_walk.pte_hole = migrate_vma_collect_hole;
8278 - mm_walk.hugetlb_entry = NULL;
8279 - mm_walk.test_walk = NULL;
8280 - mm_walk.vma = migrate->vma;
8281 - mm_walk.mm = migrate->vma->vm_mm;
8282 - mm_walk.private = migrate;
8283 + struct mm_walk mm_walk = {
8284 + .pmd_entry = migrate_vma_collect_pmd,
8285 + .pte_hole = migrate_vma_collect_hole,
8286 + .vma = migrate->vma,
8287 + .mm = migrate->vma->vm_mm,
8288 + .private = migrate,
8289 + };
8290
8291 mmu_notifier_invalidate_range_start(mm_walk.mm,
8292 migrate->start,
8293 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
8294 index 0b7b36fa0d5c..36f244125d24 100644
8295 --- a/net/batman-adv/bat_iv_ogm.c
8296 +++ b/net/batman-adv/bat_iv_ogm.c
8297 @@ -463,17 +463,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
8298 * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
8299 * @buff_pos: current position in the skb
8300 * @packet_len: total length of the skb
8301 - * @tvlv_len: tvlv length of the previously considered OGM
8302 + * @ogm_packet: potential OGM in buffer
8303 *
8304 * Return: true if there is enough space for another OGM, false otherwise.
8305 */
8306 -static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
8307 - __be16 tvlv_len)
8308 +static bool
8309 +batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
8310 + const struct batadv_ogm_packet *ogm_packet)
8311 {
8312 int next_buff_pos = 0;
8313
8314 - next_buff_pos += buff_pos + BATADV_OGM_HLEN;
8315 - next_buff_pos += ntohs(tvlv_len);
8316 + /* check if there is enough space for the header */
8317 + next_buff_pos += buff_pos + sizeof(*ogm_packet);
8318 + if (next_buff_pos > packet_len)
8319 + return false;
8320 +
8321 + /* check if there is enough space for the optional TVLV */
8322 + next_buff_pos += ntohs(ogm_packet->tvlv_len);
8323
8324 return (next_buff_pos <= packet_len) &&
8325 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
8326 @@ -501,7 +507,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
8327
8328 /* adjust all flags and log packets */
8329 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
8330 - batadv_ogm_packet->tvlv_len)) {
8331 + batadv_ogm_packet)) {
8332 /* we might have aggregated direct link packets with an
8333 * ordinary base packet
8334 */
8335 @@ -1852,7 +1858,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
8336
8337 /* unpack the aggregated packets and process them one by one */
8338 while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
8339 - ogm_packet->tvlv_len)) {
8340 + ogm_packet)) {
8341 batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
8342
8343 ogm_offset += BATADV_OGM_HLEN;
8344 diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
8345 index 0d9459b69bdb..c32820963b8e 100644
8346 --- a/net/batman-adv/netlink.c
8347 +++ b/net/batman-adv/netlink.c
8348 @@ -118,7 +118,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
8349 {
8350 struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
8351
8352 - return attr ? nla_get_u32(attr) : 0;
8353 + return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
8354 }
8355
8356 /**
8357 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
8358 index c59638574cf8..f101a6460b44 100644
8359 --- a/net/mac80211/util.c
8360 +++ b/net/mac80211/util.c
8361 @@ -3527,9 +3527,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
8362 }
8363
8364 /* Always allow software iftypes */
8365 - if (local->hw.wiphy->software_iftypes & BIT(iftype) ||
8366 - (iftype == NL80211_IFTYPE_AP_VLAN &&
8367 - local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
8368 + if (cfg80211_iftype_allowed(local->hw.wiphy, iftype, 0, 1)) {
8369 if (radar_detect)
8370 return -EINVAL;
8371 return 0;
8372 @@ -3564,7 +3562,8 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
8373
8374 if (sdata_iter == sdata ||
8375 !ieee80211_sdata_running(sdata_iter) ||
8376 - local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
8377 + cfg80211_iftype_allowed(local->hw.wiphy,
8378 + wdev_iter->iftype, 0, 1))
8379 continue;
8380
8381 params.iftype_num[wdev_iter->iftype]++;
8382 diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
8383 index 9c7da811d130..98f193fd5315 100644
8384 --- a/net/vmw_vsock/hyperv_transport.c
8385 +++ b/net/vmw_vsock/hyperv_transport.c
8386 @@ -320,6 +320,11 @@ static void hvs_close_connection(struct vmbus_channel *chan)
8387 lock_sock(sk);
8388 hvs_do_close_lock_held(vsock_sk(sk), true);
8389 release_sock(sk);
8390 +
8391 + /* Release the refcnt for the channel that's opened in
8392 + * hvs_open_connection().
8393 + */
8394 + sock_put(sk);
8395 }
8396
8397 static void hvs_open_connection(struct vmbus_channel *chan)
8398 @@ -388,6 +393,9 @@ static void hvs_open_connection(struct vmbus_channel *chan)
8399 }
8400
8401 set_per_channel_state(chan, conn_from_host ? new : sk);
8402 +
8403 + /* This reference will be dropped by hvs_close_connection(). */
8404 + sock_hold(conn_from_host ? new : sk);
8405 vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
8406
8407 /* Set the pending send size to max packet size to always get
8408 diff --git a/net/wireless/core.c b/net/wireless/core.c
8409 index 2a46ec3cb72c..68660781aa51 100644
8410 --- a/net/wireless/core.c
8411 +++ b/net/wireless/core.c
8412 @@ -1335,10 +1335,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
8413 }
8414 break;
8415 case NETDEV_PRE_UP:
8416 - if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) &&
8417 - !(wdev->iftype == NL80211_IFTYPE_AP_VLAN &&
8418 - rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
8419 - wdev->use_4addr))
8420 + if (!cfg80211_iftype_allowed(wdev->wiphy, wdev->iftype,
8421 + wdev->use_4addr, 0))
8422 return notifier_from_errno(-EOPNOTSUPP);
8423
8424 if (rfkill_blocked(rdev->rfkill))
8425 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
8426 index 8e2f03ab4cc9..2a85bff6a8f3 100644
8427 --- a/net/wireless/nl80211.c
8428 +++ b/net/wireless/nl80211.c
8429 @@ -3210,9 +3210,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
8430 return err;
8431 }
8432
8433 - if (!(rdev->wiphy.interface_modes & (1 << type)) &&
8434 - !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
8435 - rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
8436 + if (!cfg80211_iftype_allowed(&rdev->wiphy, type, params.use_4addr, 0))
8437 return -EOPNOTSUPP;
8438
8439 err = nl80211_parse_mon_options(rdev, type, info, &params);
8440 diff --git a/net/wireless/util.c b/net/wireless/util.c
8441 index d57e2f679a3e..c14e8f6e5e19 100644
8442 --- a/net/wireless/util.c
8443 +++ b/net/wireless/util.c
8444 @@ -1670,7 +1670,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
8445 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
8446 num_interfaces += params->iftype_num[iftype];
8447 if (params->iftype_num[iftype] > 0 &&
8448 - !(wiphy->software_iftypes & BIT(iftype)))
8449 + !cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
8450 used_iftypes |= BIT(iftype);
8451 }
8452
8453 @@ -1692,7 +1692,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
8454 return -ENOMEM;
8455
8456 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
8457 - if (wiphy->software_iftypes & BIT(iftype))
8458 + if (cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
8459 continue;
8460 for (j = 0; j < c->n_limits; j++) {
8461 all_iftypes |= limits[j].types;
8462 @@ -1895,3 +1895,26 @@ EXPORT_SYMBOL(rfc1042_header);
8463 const unsigned char bridge_tunnel_header[] __aligned(2) =
8464 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
8465 EXPORT_SYMBOL(bridge_tunnel_header);
8466 +
8467 +bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
8468 + bool is_4addr, u8 check_swif)
8469 +
8470 +{
8471 + bool is_vlan = iftype == NL80211_IFTYPE_AP_VLAN;
8472 +
8473 + switch (check_swif) {
8474 + case 0:
8475 + if (is_vlan && is_4addr)
8476 + return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
8477 + return wiphy->interface_modes & BIT(iftype);
8478 + case 1:
8479 + if (!(wiphy->software_iftypes & BIT(iftype)) && is_vlan)
8480 + return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
8481 + return wiphy->software_iftypes & BIT(iftype);
8482 + default:
8483 + break;
8484 + }
8485 +
8486 + return false;
8487 +}
8488 +EXPORT_SYMBOL(cfg80211_iftype_allowed);
8489 diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
8490 index c4a9ddb174bc..5aa75a0a1ced 100755
8491 --- a/scripts/decode_stacktrace.sh
8492 +++ b/scripts/decode_stacktrace.sh
8493 @@ -78,7 +78,7 @@ parse_symbol() {
8494 fi
8495
8496 # Strip out the base of the path
8497 - code=${code//^$basepath/""}
8498 + code=${code#$basepath/}
8499
8500 # In the case of inlines, move everything to same line
8501 code=${code//$'\n'/' '}
8502 diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
8503 index 088ea2ac8570..612f737cee83 100644
8504 --- a/security/apparmor/policy_unpack.c
8505 +++ b/security/apparmor/policy_unpack.c
8506 @@ -223,16 +223,21 @@ static void *kvmemdup(const void *src, size_t len)
8507 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
8508 {
8509 size_t size = 0;
8510 + void *pos = e->pos;
8511
8512 if (!inbounds(e, sizeof(u16)))
8513 - return 0;
8514 + goto fail;
8515 size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
8516 e->pos += sizeof(__le16);
8517 if (!inbounds(e, size))
8518 - return 0;
8519 + goto fail;
8520 *chunk = e->pos;
8521 e->pos += size;
8522 return size;
8523 +
8524 +fail:
8525 + e->pos = pos;
8526 + return 0;
8527 }
8528
8529 /* unpack control byte */
8530 @@ -294,49 +299,66 @@ fail:
8531
8532 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
8533 {
8534 + void *pos = e->pos;
8535 +
8536 if (unpack_nameX(e, AA_U32, name)) {
8537 if (!inbounds(e, sizeof(u32)))
8538 - return 0;
8539 + goto fail;
8540 if (data)
8541 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
8542 e->pos += sizeof(u32);
8543 return 1;
8544 }
8545 +
8546 +fail:
8547 + e->pos = pos;
8548 return 0;
8549 }
8550
8551 static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
8552 {
8553 + void *pos = e->pos;
8554 +
8555 if (unpack_nameX(e, AA_U64, name)) {
8556 if (!inbounds(e, sizeof(u64)))
8557 - return 0;
8558 + goto fail;
8559 if (data)
8560 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
8561 e->pos += sizeof(u64);
8562 return 1;
8563 }
8564 +
8565 +fail:
8566 + e->pos = pos;
8567 return 0;
8568 }
8569
8570 static size_t unpack_array(struct aa_ext *e, const char *name)
8571 {
8572 + void *pos = e->pos;
8573 +
8574 if (unpack_nameX(e, AA_ARRAY, name)) {
8575 int size;
8576 if (!inbounds(e, sizeof(u16)))
8577 - return 0;
8578 + goto fail;
8579 size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
8580 e->pos += sizeof(u16);
8581 return size;
8582 }
8583 +
8584 +fail:
8585 + e->pos = pos;
8586 return 0;
8587 }
8588
8589 static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
8590 {
8591 + void *pos = e->pos;
8592 +
8593 if (unpack_nameX(e, AA_BLOB, name)) {
8594 u32 size;
8595 if (!inbounds(e, sizeof(u32)))
8596 - return 0;
8597 + goto fail;
8598 size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
8599 e->pos += sizeof(u32);
8600 if (inbounds(e, (size_t) size)) {
8601 @@ -345,6 +367,9 @@ static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
8602 return size;
8603 }
8604 }
8605 +
8606 +fail:
8607 + e->pos = pos;
8608 return 0;
8609 }
8610
8611 @@ -361,9 +386,10 @@ static int unpack_str(struct aa_ext *e, const char **string, const char *name)
8612 if (src_str[size - 1] != 0)
8613 goto fail;
8614 *string = src_str;
8615 +
8616 + return size;
8617 }
8618 }
8619 - return size;
8620
8621 fail:
8622 e->pos = pos;
8623 diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
8624 index b9a6b66aeb0e..d8ba3a6d5042 100644
8625 --- a/sound/pci/hda/hda_auto_parser.c
8626 +++ b/sound/pci/hda/hda_auto_parser.c
8627 @@ -828,6 +828,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
8628 while (id >= 0) {
8629 const struct hda_fixup *fix = codec->fixup_list + id;
8630
8631 + if (++depth > 10)
8632 + break;
8633 if (fix->chained_before)
8634 apply_fixup(codec, fix->chain_id, action, depth + 1);
8635
8636 @@ -867,8 +869,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
8637 }
8638 if (!fix->chained || fix->chained_before)
8639 break;
8640 - if (++depth > 10)
8641 - break;
8642 id = fix->chain_id;
8643 }
8644 }
8645 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
8646 index a6233775e779..82b0dc9f528f 100644
8647 --- a/sound/pci/hda/hda_codec.c
8648 +++ b/sound/pci/hda/hda_codec.c
8649 @@ -2947,15 +2947,19 @@ static int hda_codec_runtime_resume(struct device *dev)
8650 #ifdef CONFIG_PM_SLEEP
8651 static int hda_codec_force_resume(struct device *dev)
8652 {
8653 + struct hda_codec *codec = dev_to_hda_codec(dev);
8654 + bool forced_resume = !codec->relaxed_resume;
8655 int ret;
8656
8657 /* The get/put pair below enforces the runtime resume even if the
8658 * device hasn't been used at suspend time. This trick is needed to
8659 * update the jack state change during the sleep.
8660 */
8661 - pm_runtime_get_noresume(dev);
8662 + if (forced_resume)
8663 + pm_runtime_get_noresume(dev);
8664 ret = pm_runtime_force_resume(dev);
8665 - pm_runtime_put(dev);
8666 + if (forced_resume)
8667 + pm_runtime_put(dev);
8668 return ret;
8669 }
8670
8671 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
8672 index acacc1900265..2003403ce1c8 100644
8673 --- a/sound/pci/hda/hda_codec.h
8674 +++ b/sound/pci/hda/hda_codec.h
8675 @@ -261,6 +261,8 @@ struct hda_codec {
8676 unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
8677 unsigned int force_pin_prefix:1; /* Add location prefix */
8678 unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
8679 + unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
8680 +
8681 #ifdef CONFIG_PM
8682 unsigned long power_on_acct;
8683 unsigned long power_off_acct;
8684 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
8685 index bb2bd33b00ec..2609161707a4 100644
8686 --- a/sound/pci/hda/hda_generic.c
8687 +++ b/sound/pci/hda/hda_generic.c
8688 @@ -5991,7 +5991,8 @@ int snd_hda_gen_init(struct hda_codec *codec)
8689 if (spec->init_hook)
8690 spec->init_hook(codec);
8691
8692 - snd_hda_apply_verbs(codec);
8693 + if (!spec->skip_verbs)
8694 + snd_hda_apply_verbs(codec);
8695
8696 init_multi_out(codec);
8697 init_extra_out(codec);
8698 diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
8699 index ce9c293717b9..8933c0f64cc4 100644
8700 --- a/sound/pci/hda/hda_generic.h
8701 +++ b/sound/pci/hda/hda_generic.h
8702 @@ -247,6 +247,7 @@ struct hda_gen_spec {
8703 unsigned int indep_hp_enabled:1; /* independent HP enabled */
8704 unsigned int have_aamix_ctl:1;
8705 unsigned int hp_mic_jack_modes:1;
8706 + unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
8707
8708 /* additional mute flags (only effective with auto_mute_via_amp=1) */
8709 u64 mute_bits;
8710 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
8711 index 7a3e34b120b3..c3e3d80ff720 100644
8712 --- a/sound/pci/hda/hda_intel.c
8713 +++ b/sound/pci/hda/hda_intel.c
8714 @@ -329,13 +329,11 @@ enum {
8715
8716 #define AZX_DCAPS_INTEL_SKYLAKE \
8717 (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
8718 + AZX_DCAPS_SYNC_WRITE |\
8719 AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT |\
8720 AZX_DCAPS_I915_POWERWELL)
8721
8722 -#define AZX_DCAPS_INTEL_BROXTON \
8723 - (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
8724 - AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT |\
8725 - AZX_DCAPS_I915_POWERWELL)
8726 +#define AZX_DCAPS_INTEL_BROXTON AZX_DCAPS_INTEL_SKYLAKE
8727
8728 /* quirks for ATI SB / AMD Hudson */
8729 #define AZX_DCAPS_PRESET_ATI_SB \
8730 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
8731 index 35931a18418f..e4fbfb5557ab 100644
8732 --- a/sound/pci/hda/patch_hdmi.c
8733 +++ b/sound/pci/hda/patch_hdmi.c
8734 @@ -2293,8 +2293,10 @@ static void generic_hdmi_free(struct hda_codec *codec)
8735 struct hdmi_spec *spec = codec->spec;
8736 int pin_idx, pcm_idx;
8737
8738 - if (codec_has_acomp(codec))
8739 + if (codec_has_acomp(codec)) {
8740 snd_hdac_acomp_register_notifier(&codec->bus->core, NULL);
8741 + codec->relaxed_resume = 0;
8742 + }
8743
8744 for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
8745 struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
8746 @@ -2550,6 +2552,8 @@ static void register_i915_notifier(struct hda_codec *codec)
8747 spec->drm_audio_ops.pin_eld_notify = intel_pin_eld_notify;
8748 snd_hdac_acomp_register_notifier(&codec->bus->core,
8749 &spec->drm_audio_ops);
8750 + /* no need for forcible resume for jack check thanks to notifier */
8751 + codec->relaxed_resume = 1;
8752 }
8753
8754 /* setup_stream ops override for HSW+ */
8755 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
8756 index 9b5caf099bfb..7f74ebee8c2d 100644
8757 --- a/sound/pci/hda/patch_realtek.c
8758 +++ b/sound/pci/hda/patch_realtek.c
8759 @@ -836,9 +836,11 @@ static int alc_init(struct hda_codec *codec)
8760 if (spec->init_hook)
8761 spec->init_hook(codec);
8762
8763 + spec->gen.skip_verbs = 1; /* applied in below */
8764 snd_hda_gen_init(codec);
8765 alc_fix_pll(codec);
8766 alc_auto_init_amp(codec, spec->init_amp);
8767 + snd_hda_apply_verbs(codec); /* apply verbs here after own init */
8768
8769 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
8770
8771 @@ -5673,6 +5675,7 @@ enum {
8772 ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
8773 ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
8774 ALC299_FIXUP_PREDATOR_SPK,
8775 + ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
8776 };
8777
8778 static const struct hda_fixup alc269_fixups[] = {
8779 @@ -6701,6 +6704,16 @@ static const struct hda_fixup alc269_fixups[] = {
8780 { }
8781 }
8782 },
8783 + [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
8784 + .type = HDA_FIXUP_PINS,
8785 + .v.pins = (const struct hda_pintbl[]) {
8786 + { 0x14, 0x411111f0 }, /* disable confusing internal speaker */
8787 + { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
8788 + { }
8789 + },
8790 + .chained = true,
8791 + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
8792 + },
8793 };
8794
8795 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
8796 @@ -6843,6 +6856,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
8797 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
8798 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
8799 SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
8800 + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
8801 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
8802 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
8803 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
8804 @@ -6859,6 +6873,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
8805 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
8806 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
8807 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
8808 + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
8809 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
8810 SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
8811 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
8812 @@ -6936,6 +6951,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
8813 SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
8814 SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
8815 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
8816 + SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
8817 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
8818 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
8819 SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
8820 @@ -8798,6 +8814,7 @@ static int patch_alc680(struct hda_codec *codec)
8821 static const struct hda_device_id snd_hda_id_realtek[] = {
8822 HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269),
8823 HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
8824 + HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
8825 HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
8826 HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
8827 HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
8828 diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
8829 index 1ba069967fa2..ba2d9fab28d0 100755
8830 --- a/tools/testing/selftests/net/fib_rule_tests.sh
8831 +++ b/tools/testing/selftests/net/fib_rule_tests.sh
8832 @@ -15,6 +15,7 @@ GW_IP6=2001:db8:1::2
8833 SRC_IP6=2001:db8:1::3
8834
8835 DEV_ADDR=192.51.100.1
8836 +DEV_ADDR6=2001:db8:1::1
8837 DEV=dummy0
8838
8839 log_test()
8840 @@ -55,8 +56,8 @@ setup()
8841
8842 $IP link add dummy0 type dummy
8843 $IP link set dev dummy0 up
8844 - $IP address add 192.51.100.1/24 dev dummy0
8845 - $IP -6 address add 2001:db8:1::1/64 dev dummy0
8846 + $IP address add $DEV_ADDR/24 dev dummy0
8847 + $IP -6 address add $DEV_ADDR6/64 dev dummy0
8848
8849 set +e
8850 }
8851 diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
8852 index b20b751286fc..757a17f5ebde 100644
8853 --- a/virt/kvm/eventfd.c
8854 +++ b/virt/kvm/eventfd.c
8855 @@ -44,6 +44,12 @@
8856
8857 static struct workqueue_struct *irqfd_cleanup_wq;
8858
8859 +bool __attribute__((weak))
8860 +kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
8861 +{
8862 + return true;
8863 +}
8864 +
8865 static void
8866 irqfd_inject(struct work_struct *work)
8867 {
8868 @@ -297,6 +303,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
8869 if (!kvm_arch_intc_initialized(kvm))
8870 return -EAGAIN;
8871
8872 + if (!kvm_arch_irqfd_allowed(kvm, args))
8873 + return -EINVAL;
8874 +
8875 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
8876 if (!irqfd)
8877 return -ENOMEM;