Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0130-4.19.31-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3409 - (show annotations) (download)
Fri Aug 2 11:47:41 2019 UTC (4 years, 8 months ago) by niro
File size: 392684 byte(s)
-linux-4.19.31
1 diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
2 index 3b2f2dd82225..3c6fc2e08d04 100644
3 --- a/Documentation/arm64/silicon-errata.txt
4 +++ b/Documentation/arm64/silicon-errata.txt
5 @@ -44,6 +44,8 @@ stable kernels.
6
7 | Implementor | Component | Erratum ID | Kconfig |
8 +----------------+-----------------+-----------------+-----------------------------+
9 +| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
10 +| | | | |
11 | ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
12 | ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
13 | ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
14 diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
15 index 0de6f6145cc6..7ba8cd567f84 100644
16 --- a/Documentation/process/stable-kernel-rules.rst
17 +++ b/Documentation/process/stable-kernel-rules.rst
18 @@ -38,6 +38,9 @@ Procedure for submitting patches to the -stable tree
19 - If the patch covers files in net/ or drivers/net please follow netdev stable
20 submission guidelines as described in
21 :ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
22 + after first checking the stable networking queue at
23 + https://patchwork.ozlabs.org/bundle/davem/stable/?series=&submitter=&state=*&q=&archive=
24 + to ensure the requested patch is not already queued up.
25 - Security patches should not be handled (solely) by the -stable review
26 process but should follow the procedures in
27 :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
28 diff --git a/Makefile b/Makefile
29 index 72e27c379eaf..3b1c6cff6700 100644
30 --- a/Makefile
31 +++ b/Makefile
32 @@ -1,7 +1,7 @@
33 # SPDX-License-Identifier: GPL-2.0
34 VERSION = 4
35 PATCHLEVEL = 19
36 -SUBLEVEL = 30
37 +SUBLEVEL = 31
38 EXTRAVERSION =
39 NAME = "People's Front"
40
41 diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
42 index ac69f307dcfe..74953e76a57d 100644
43 --- a/arch/arc/Kconfig
44 +++ b/arch/arc/Kconfig
45 @@ -420,6 +420,14 @@ config ARC_HAS_ACCL_REGS
46 (also referred to as r58:r59). These can also be used by gcc as GPR so
47 kernel needs to save/restore per process
48
49 +config ARC_IRQ_NO_AUTOSAVE
50 + bool "Disable hardware autosave regfile on interrupts"
51 + default n
52 + help
53 + On HS cores, taken interrupt auto saves the regfile on stack.
54 + This is programmable and can be optionally disabled in which case
55 + software INTERRUPT_PROLOGUE/EPILGUE do the needed work
56 +
57 endif # ISA_ARCV2
58
59 endmenu # "ARC CPU Configuration"
60 diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
61 index 49bfbd879caa..bdbdaef902eb 100644
62 --- a/arch/arc/include/asm/arcregs.h
63 +++ b/arch/arc/include/asm/arcregs.h
64 @@ -151,6 +151,14 @@ struct bcr_isa_arcv2 {
65 #endif
66 };
67
68 +struct bcr_uarch_build_arcv2 {
69 +#ifdef CONFIG_CPU_BIG_ENDIAN
70 + unsigned int pad:8, prod:8, maj:8, min:8;
71 +#else
72 + unsigned int min:8, maj:8, prod:8, pad:8;
73 +#endif
74 +};
75 +
76 struct bcr_mpy {
77 #ifdef CONFIG_CPU_BIG_ENDIAN
78 unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
79 diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
80 index 309f4e6721b3..225e7df2d8ed 100644
81 --- a/arch/arc/include/asm/entry-arcv2.h
82 +++ b/arch/arc/include/asm/entry-arcv2.h
83 @@ -17,6 +17,33 @@
84 ;
85 ; Now manually save: r12, sp, fp, gp, r25
86
87 +#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
88 +.ifnc \called_from, exception
89 + st.as r9, [sp, -10] ; save r9 in it's final stack slot
90 + sub sp, sp, 12 ; skip JLI, LDI, EI
91 +
92 + PUSH lp_count
93 + PUSHAX lp_start
94 + PUSHAX lp_end
95 + PUSH blink
96 +
97 + PUSH r11
98 + PUSH r10
99 +
100 + sub sp, sp, 4 ; skip r9
101 +
102 + PUSH r8
103 + PUSH r7
104 + PUSH r6
105 + PUSH r5
106 + PUSH r4
107 + PUSH r3
108 + PUSH r2
109 + PUSH r1
110 + PUSH r0
111 +.endif
112 +#endif
113 +
114 #ifdef CONFIG_ARC_HAS_ACCL_REGS
115 PUSH r59
116 PUSH r58
117 @@ -86,6 +113,33 @@
118 POP r59
119 #endif
120
121 +#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
122 +.ifnc \called_from, exception
123 + POP r0
124 + POP r1
125 + POP r2
126 + POP r3
127 + POP r4
128 + POP r5
129 + POP r6
130 + POP r7
131 + POP r8
132 + POP r9
133 + POP r10
134 + POP r11
135 +
136 + POP blink
137 + POPAX lp_end
138 + POPAX lp_start
139 +
140 + POP r9
141 + mov lp_count, r9
142 +
143 + add sp, sp, 12 ; skip JLI, LDI, EI
144 + ld.as r9, [sp, -10] ; reload r9 which got clobbered
145 +.endif
146 +#endif
147 +
148 .endm
149
150 /*------------------------------------------------------------------------*/
151 diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
152 index c9173c02081c..eabc3efa6c6d 100644
153 --- a/arch/arc/include/asm/uaccess.h
154 +++ b/arch/arc/include/asm/uaccess.h
155 @@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
156 */
157 "=&r" (tmp), "+r" (to), "+r" (from)
158 :
159 - : "lp_count", "lp_start", "lp_end", "memory");
160 + : "lp_count", "memory");
161
162 return n;
163 }
164 @@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
165 */
166 "=&r" (tmp), "+r" (to), "+r" (from)
167 :
168 - : "lp_count", "lp_start", "lp_end", "memory");
169 + : "lp_count", "memory");
170
171 return n;
172 }
173 @@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
174 " .previous \n"
175 : "+r"(d_char), "+r"(res)
176 : "i"(0)
177 - : "lp_count", "lp_start", "lp_end", "memory");
178 + : "lp_count", "memory");
179
180 return res;
181 }
182 @@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
183 " .previous \n"
184 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
185 : "g"(-EFAULT), "r"(count)
186 - : "lp_count", "lp_start", "lp_end", "memory");
187 + : "lp_count", "memory");
188
189 return res;
190 }
191 diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
192 index cc558a25b8fa..562089d62d9d 100644
193 --- a/arch/arc/kernel/entry-arcv2.S
194 +++ b/arch/arc/kernel/entry-arcv2.S
195 @@ -209,7 +209,9 @@ restore_regs:
196 ;####### Return from Intr #######
197
198 debug_marker_l1:
199 - bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
200 + ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
201 + btst r0, STATUS_DE_BIT ; Z flag set if bit clear
202 + bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
203
204 .Lisr_ret_fast_path:
205 ; Handle special case #1: (Entry via Exception, Return via IRQ)
206 diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
207 index 067ea362fb3e..cf18b3e5a934 100644
208 --- a/arch/arc/kernel/intc-arcv2.c
209 +++ b/arch/arc/kernel/intc-arcv2.c
210 @@ -49,11 +49,13 @@ void arc_init_IRQ(void)
211
212 *(unsigned int *)&ictrl = 0;
213
214 +#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
215 ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
216 ictrl.save_blink = 1;
217 ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
218 ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
219 ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
220 +#endif
221
222 WRITE_AUX(AUX_IRQ_CTRL, ictrl);
223
224 diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
225 index 62a30e58441c..3320ca2fe20f 100644
226 --- a/arch/arc/kernel/setup.c
227 +++ b/arch/arc/kernel/setup.c
228 @@ -196,13 +196,29 @@ static void read_arc_build_cfg_regs(void)
229 cpu->bpu.num_pred = 2048 << bpu.pte;
230
231 if (cpu->core.family >= 0x54) {
232 - unsigned int exec_ctrl;
233
234 - READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
235 - cpu->extn.dual_enb = !(exec_ctrl & 1);
236 + struct bcr_uarch_build_arcv2 uarch;
237
238 - /* dual issue always present for this core */
239 - cpu->extn.dual = 1;
240 + /*
241 + * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
242 + * dual issue only (HS4x). But next uarch rev (1:0)
243 + * allows it be configured for single issue (HS3x)
244 + * Ensure we fiddle with dual issue only on HS4x
245 + */
246 + READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
247 +
248 + if (uarch.prod == 4) {
249 + unsigned int exec_ctrl;
250 +
251 + /* dual issue hardware always present */
252 + cpu->extn.dual = 1;
253 +
254 + READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
255 +
256 + /* dual issue hardware enabled ? */
257 + cpu->extn.dual_enb = !(exec_ctrl & 1);
258 +
259 + }
260 }
261 }
262
263 diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
264 index d61044dd8b58..ea14b0bf3116 100644
265 --- a/arch/arc/lib/memcpy-archs.S
266 +++ b/arch/arc/lib/memcpy-archs.S
267 @@ -25,15 +25,11 @@
268 #endif
269
270 #ifdef CONFIG_ARC_HAS_LL64
271 -# define PREFETCH_READ(RX) prefetch [RX, 56]
272 -# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
273 # define LOADX(DST,RX) ldd.ab DST, [RX, 8]
274 # define STOREX(SRC,RX) std.ab SRC, [RX, 8]
275 # define ZOLSHFT 5
276 # define ZOLAND 0x1F
277 #else
278 -# define PREFETCH_READ(RX) prefetch [RX, 28]
279 -# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
280 # define LOADX(DST,RX) ld.ab DST, [RX, 4]
281 # define STOREX(SRC,RX) st.ab SRC, [RX, 4]
282 # define ZOLSHFT 4
283 @@ -41,8 +37,6 @@
284 #endif
285
286 ENTRY_CFI(memcpy)
287 - prefetch [r1] ; Prefetch the read location
288 - prefetchw [r0] ; Prefetch the write location
289 mov.f 0, r2
290 ;;; if size is zero
291 jz.d [blink]
292 @@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
293 lpnz @.Lcopy32_64bytes
294 ;; LOOP START
295 LOADX (r6, r1)
296 - PREFETCH_READ (r1)
297 - PREFETCH_WRITE (r3)
298 LOADX (r8, r1)
299 LOADX (r10, r1)
300 LOADX (r4, r1)
301 @@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
302 lpnz @.Lcopy8bytes_1
303 ;; LOOP START
304 ld.ab r6, [r1, 4]
305 - prefetch [r1, 28] ;Prefetch the next read location
306 ld.ab r8, [r1,4]
307 - prefetchw [r3, 32] ;Prefetch the next write location
308
309 SHIFT_1 (r7, r6, 24)
310 or r7, r7, r5
311 @@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
312 lpnz @.Lcopy8bytes_2
313 ;; LOOP START
314 ld.ab r6, [r1, 4]
315 - prefetch [r1, 28] ;Prefetch the next read location
316 ld.ab r8, [r1,4]
317 - prefetchw [r3, 32] ;Prefetch the next write location
318
319 SHIFT_1 (r7, r6, 16)
320 or r7, r7, r5
321 @@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
322 lpnz @.Lcopy8bytes_3
323 ;; LOOP START
324 ld.ab r6, [r1, 4]
325 - prefetch [r1, 28] ;Prefetch the next read location
326 ld.ab r8, [r1,4]
327 - prefetchw [r3, 32] ;Prefetch the next write location
328
329 SHIFT_1 (r7, r6, 8)
330 or r7, r7, r5
331 diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
332 index 9356753c2ed8..c285a83cbf08 100644
333 --- a/arch/arc/plat-hsdk/Kconfig
334 +++ b/arch/arc/plat-hsdk/Kconfig
335 @@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK
336 bool "ARC HS Development Kit SOC"
337 depends on ISA_ARCV2
338 select ARC_HAS_ACCL_REGS
339 + select ARC_IRQ_NO_AUTOSAVE
340 select CLK_HSDK
341 select RESET_HSDK
342 select MIGHT_HAVE_PCI
343 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
344 index e8cd55a5b04c..cd4c74daf71e 100644
345 --- a/arch/arm/Kconfig
346 +++ b/arch/arm/Kconfig
347 @@ -1444,6 +1444,7 @@ config NR_CPUS
348 config HOTPLUG_CPU
349 bool "Support for hot-pluggable CPUs"
350 depends on SMP
351 + select GENERIC_IRQ_MIGRATION
352 help
353 Say Y here to experiment with turning CPUs off and on. CPUs
354 can be controlled through /sys/devices/system/cpu.
355 diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
356 index f3ac7483afed..5d04dc68cf57 100644
357 --- a/arch/arm/boot/dts/armada-xp-db.dts
358 +++ b/arch/arm/boot/dts/armada-xp-db.dts
359 @@ -144,30 +144,32 @@
360 status = "okay";
361 };
362
363 - nand@d0000 {
364 + nand-controller@d0000 {
365 status = "okay";
366 - label = "pxa3xx_nand-0";
367 - num-cs = <1>;
368 - marvell,nand-keep-config;
369 - nand-on-flash-bbt;
370 -
371 - partitions {
372 - compatible = "fixed-partitions";
373 - #address-cells = <1>;
374 - #size-cells = <1>;
375 -
376 - partition@0 {
377 - label = "U-Boot";
378 - reg = <0 0x800000>;
379 - };
380 - partition@800000 {
381 - label = "Linux";
382 - reg = <0x800000 0x800000>;
383 - };
384 - partition@1000000 {
385 - label = "Filesystem";
386 - reg = <0x1000000 0x3f000000>;
387
388 + nand@0 {
389 + reg = <0>;
390 + label = "pxa3xx_nand-0";
391 + nand-rb = <0>;
392 + nand-on-flash-bbt;
393 +
394 + partitions {
395 + compatible = "fixed-partitions";
396 + #address-cells = <1>;
397 + #size-cells = <1>;
398 +
399 + partition@0 {
400 + label = "U-Boot";
401 + reg = <0 0x800000>;
402 + };
403 + partition@800000 {
404 + label = "Linux";
405 + reg = <0x800000 0x800000>;
406 + };
407 + partition@1000000 {
408 + label = "Filesystem";
409 + reg = <0x1000000 0x3f000000>;
410 + };
411 };
412 };
413 };
414 diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
415 index 1139e9469a83..b4cca507cf13 100644
416 --- a/arch/arm/boot/dts/armada-xp-gp.dts
417 +++ b/arch/arm/boot/dts/armada-xp-gp.dts
418 @@ -160,12 +160,15 @@
419 status = "okay";
420 };
421
422 - nand@d0000 {
423 + nand-controller@d0000 {
424 status = "okay";
425 - label = "pxa3xx_nand-0";
426 - num-cs = <1>;
427 - marvell,nand-keep-config;
428 - nand-on-flash-bbt;
429 +
430 + nand@0 {
431 + reg = <0>;
432 + label = "pxa3xx_nand-0";
433 + nand-rb = <0>;
434 + nand-on-flash-bbt;
435 + };
436 };
437 };
438
439 diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
440 index bbbb38888bb8..87dcb502f72d 100644
441 --- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
442 +++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
443 @@ -81,49 +81,52 @@
444
445 };
446
447 - nand@d0000 {
448 + nand-controller@d0000 {
449 status = "okay";
450 - label = "pxa3xx_nand-0";
451 - num-cs = <1>;
452 - marvell,nand-keep-config;
453 - nand-on-flash-bbt;
454 -
455 - partitions {
456 - compatible = "fixed-partitions";
457 - #address-cells = <1>;
458 - #size-cells = <1>;
459 -
460 - partition@0 {
461 - label = "u-boot";
462 - reg = <0x00000000 0x000e0000>;
463 - read-only;
464 - };
465 -
466 - partition@e0000 {
467 - label = "u-boot-env";
468 - reg = <0x000e0000 0x00020000>;
469 - read-only;
470 - };
471 -
472 - partition@100000 {
473 - label = "u-boot-env2";
474 - reg = <0x00100000 0x00020000>;
475 - read-only;
476 - };
477 -
478 - partition@120000 {
479 - label = "zImage";
480 - reg = <0x00120000 0x00400000>;
481 - };
482 -
483 - partition@520000 {
484 - label = "initrd";
485 - reg = <0x00520000 0x00400000>;
486 - };
487
488 - partition@e00000 {
489 - label = "boot";
490 - reg = <0x00e00000 0x3f200000>;
491 + nand@0 {
492 + reg = <0>;
493 + label = "pxa3xx_nand-0";
494 + nand-rb = <0>;
495 + nand-on-flash-bbt;
496 +
497 + partitions {
498 + compatible = "fixed-partitions";
499 + #address-cells = <1>;
500 + #size-cells = <1>;
501 +
502 + partition@0 {
503 + label = "u-boot";
504 + reg = <0x00000000 0x000e0000>;
505 + read-only;
506 + };
507 +
508 + partition@e0000 {
509 + label = "u-boot-env";
510 + reg = <0x000e0000 0x00020000>;
511 + read-only;
512 + };
513 +
514 + partition@100000 {
515 + label = "u-boot-env2";
516 + reg = <0x00100000 0x00020000>;
517 + read-only;
518 + };
519 +
520 + partition@120000 {
521 + label = "zImage";
522 + reg = <0x00120000 0x00400000>;
523 + };
524 +
525 + partition@520000 {
526 + label = "initrd";
527 + reg = <0x00520000 0x00400000>;
528 + };
529 +
530 + partition@e00000 {
531 + label = "boot";
532 + reg = <0x00e00000 0x3f200000>;
533 + };
534 };
535 };
536 };
537 diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
538 index 04758a2a87f0..67d77eee9433 100644
539 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
540 +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
541 @@ -644,6 +644,17 @@
542 };
543 };
544
545 +/* Configure pwm clock source for timers 8 & 9 */
546 +&timer8 {
547 + assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
548 + assigned-clock-parents = <&sys_clkin_ck>;
549 +};
550 +
551 +&timer9 {
552 + assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
553 + assigned-clock-parents = <&sys_clkin_ck>;
554 +};
555 +
556 /*
557 * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
558 * uart1 wakeirq.
559 diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
560 index d5f11d6d987e..bc85b6a166c7 100644
561 --- a/arch/arm/boot/dts/tegra124-nyan.dtsi
562 +++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
563 @@ -13,10 +13,25 @@
564 stdout-path = "serial0:115200n8";
565 };
566
567 - memory@80000000 {
568 + /*
569 + * Note that recent version of the device tree compiler (starting with
570 + * version 1.4.2) warn about this node containing a reg property, but
571 + * missing a unit-address. However, the bootloader on these Chromebook
572 + * devices relies on the full name of this node to be exactly /memory.
573 + * Adding the unit-address causes the bootloader to create a /memory
574 + * node and write the memory bank configuration to that node, which in
575 + * turn leads the kernel to believe that the device has 2 GiB of
576 + * memory instead of the amount detected by the bootloader.
577 + *
578 + * The name of this node is effectively ABI and must not be changed.
579 + */
580 + memory {
581 + device_type = "memory";
582 reg = <0x0 0x80000000 0x0 0x80000000>;
583 };
584
585 + /delete-node/ memory@80000000;
586 +
587 host1x@50000000 {
588 hdmi@54280000 {
589 status = "okay";
590 diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S
591 index ce45ba0c0687..16019b5961e7 100644
592 --- a/arch/arm/crypto/crct10dif-ce-core.S
593 +++ b/arch/arm/crypto/crct10dif-ce-core.S
594 @@ -124,10 +124,10 @@ ENTRY(crc_t10dif_pmull)
595 vext.8 q10, qzr, q0, #4
596
597 // receive the initial 64B data, xor the initial crc value
598 - vld1.64 {q0-q1}, [arg2, :128]!
599 - vld1.64 {q2-q3}, [arg2, :128]!
600 - vld1.64 {q4-q5}, [arg2, :128]!
601 - vld1.64 {q6-q7}, [arg2, :128]!
602 + vld1.64 {q0-q1}, [arg2]!
603 + vld1.64 {q2-q3}, [arg2]!
604 + vld1.64 {q4-q5}, [arg2]!
605 + vld1.64 {q6-q7}, [arg2]!
606 CPU_LE( vrev64.8 q0, q0 )
607 CPU_LE( vrev64.8 q1, q1 )
608 CPU_LE( vrev64.8 q2, q2 )
609 @@ -167,7 +167,7 @@ CPU_LE( vrev64.8 q7, q7 )
610 _fold_64_B_loop:
611
612 .macro fold64, reg1, reg2
613 - vld1.64 {q11-q12}, [arg2, :128]!
614 + vld1.64 {q11-q12}, [arg2]!
615
616 vmull.p64 q8, \reg1\()h, d21
617 vmull.p64 \reg1, \reg1\()l, d20
618 @@ -238,7 +238,7 @@ _16B_reduction_loop:
619 vmull.p64 q7, d15, d21
620 veor.8 q7, q7, q8
621
622 - vld1.64 {q0}, [arg2, :128]!
623 + vld1.64 {q0}, [arg2]!
624 CPU_LE( vrev64.8 q0, q0 )
625 vswp d0, d1
626 veor.8 q7, q7, q0
627 @@ -335,7 +335,7 @@ _less_than_128:
628 vmov.i8 q0, #0
629 vmov s3, arg1_low32 // get the initial crc value
630
631 - vld1.64 {q7}, [arg2, :128]!
632 + vld1.64 {q7}, [arg2]!
633 CPU_LE( vrev64.8 q7, q7 )
634 vswp d14, d15
635 veor.8 q7, q7, q0
636 diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
637 index d428355cf38d..14c19c70a841 100644
638 --- a/arch/arm/crypto/crct10dif-ce-glue.c
639 +++ b/arch/arm/crypto/crct10dif-ce-glue.c
640 @@ -35,26 +35,15 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
641 unsigned int length)
642 {
643 u16 *crc = shash_desc_ctx(desc);
644 - unsigned int l;
645
646 - if (!may_use_simd()) {
647 - *crc = crc_t10dif_generic(*crc, data, length);
648 + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
649 + kernel_neon_begin();
650 + *crc = crc_t10dif_pmull(*crc, data, length);
651 + kernel_neon_end();
652 } else {
653 - if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
654 - l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
655 - ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
656 -
657 - *crc = crc_t10dif_generic(*crc, data, l);
658 -
659 - length -= l;
660 - data += l;
661 - }
662 - if (length > 0) {
663 - kernel_neon_begin();
664 - *crc = crc_t10dif_pmull(*crc, data, length);
665 - kernel_neon_end();
666 - }
667 + *crc = crc_t10dif_generic(*crc, data, length);
668 }
669 +
670 return 0;
671 }
672
673 diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
674 index c883fcbe93b6..46d41140df27 100644
675 --- a/arch/arm/include/asm/irq.h
676 +++ b/arch/arm/include/asm/irq.h
677 @@ -25,7 +25,6 @@
678 #ifndef __ASSEMBLY__
679 struct irqaction;
680 struct pt_regs;
681 -extern void migrate_irqs(void);
682
683 extern void asm_do_IRQ(unsigned int, struct pt_regs *);
684 void handle_IRQ(unsigned int, struct pt_regs *);
685 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
686 index 3ad482d2f1eb..d0d0227fc70d 100644
687 --- a/arch/arm/include/asm/kvm_host.h
688 +++ b/arch/arm/include/asm/kvm_host.h
689 @@ -48,6 +48,7 @@
690 #define KVM_REQ_SLEEP \
691 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
692 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
693 +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
694
695 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
696
697 @@ -147,6 +148,13 @@ struct kvm_cpu_context {
698
699 typedef struct kvm_cpu_context kvm_cpu_context_t;
700
701 +struct vcpu_reset_state {
702 + unsigned long pc;
703 + unsigned long r0;
704 + bool be;
705 + bool reset;
706 +};
707 +
708 struct kvm_vcpu_arch {
709 struct kvm_cpu_context ctxt;
710
711 @@ -186,6 +194,8 @@ struct kvm_vcpu_arch {
712 /* Cache some mmu pages needed inside spinlock regions */
713 struct kvm_mmu_memory_cache mmu_page_cache;
714
715 + struct vcpu_reset_state reset_state;
716 +
717 /* Detect first run of a vcpu */
718 bool has_run_once;
719 };
720 diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
721 index 9908dacf9229..844861368cd5 100644
722 --- a/arch/arm/kernel/irq.c
723 +++ b/arch/arm/kernel/irq.c
724 @@ -31,7 +31,6 @@
725 #include <linux/smp.h>
726 #include <linux/init.h>
727 #include <linux/seq_file.h>
728 -#include <linux/ratelimit.h>
729 #include <linux/errno.h>
730 #include <linux/list.h>
731 #include <linux/kallsyms.h>
732 @@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void)
733 return nr_irqs;
734 }
735 #endif
736 -
737 -#ifdef CONFIG_HOTPLUG_CPU
738 -static bool migrate_one_irq(struct irq_desc *desc)
739 -{
740 - struct irq_data *d = irq_desc_get_irq_data(desc);
741 - const struct cpumask *affinity = irq_data_get_affinity_mask(d);
742 - struct irq_chip *c;
743 - bool ret = false;
744 -
745 - /*
746 - * If this is a per-CPU interrupt, or the affinity does not
747 - * include this CPU, then we have nothing to do.
748 - */
749 - if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
750 - return false;
751 -
752 - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
753 - affinity = cpu_online_mask;
754 - ret = true;
755 - }
756 -
757 - c = irq_data_get_irq_chip(d);
758 - if (!c->irq_set_affinity)
759 - pr_debug("IRQ%u: unable to set affinity\n", d->irq);
760 - else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
761 - cpumask_copy(irq_data_get_affinity_mask(d), affinity);
762 -
763 - return ret;
764 -}
765 -
766 -/*
767 - * The current CPU has been marked offline. Migrate IRQs off this CPU.
768 - * If the affinity settings do not allow other CPUs, force them onto any
769 - * available CPU.
770 - *
771 - * Note: we must iterate over all IRQs, whether they have an attached
772 - * action structure or not, as we need to get chained interrupts too.
773 - */
774 -void migrate_irqs(void)
775 -{
776 - unsigned int i;
777 - struct irq_desc *desc;
778 - unsigned long flags;
779 -
780 - local_irq_save(flags);
781 -
782 - for_each_irq_desc(i, desc) {
783 - bool affinity_broken;
784 -
785 - raw_spin_lock(&desc->lock);
786 - affinity_broken = migrate_one_irq(desc);
787 - raw_spin_unlock(&desc->lock);
788 -
789 - if (affinity_broken)
790 - pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
791 - i, smp_processor_id());
792 - }
793 -
794 - local_irq_restore(flags);
795 -}
796 -#endif /* CONFIG_HOTPLUG_CPU */
797 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
798 index 3bf82232b1be..1d6f5ea522f4 100644
799 --- a/arch/arm/kernel/smp.c
800 +++ b/arch/arm/kernel/smp.c
801 @@ -254,7 +254,7 @@ int __cpu_disable(void)
802 /*
803 * OK - migrate IRQs away from this CPU
804 */
805 - migrate_irqs();
806 + irq_migrate_all_off_this_cpu();
807
808 /*
809 * Flush user cache and TLB mappings, and then remove this CPU
810 diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
811 index cb094e55dc5f..fd6cde23bb5d 100644
812 --- a/arch/arm/kvm/coproc.c
813 +++ b/arch/arm/kvm/coproc.c
814 @@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
815 reset_coproc_regs(vcpu, table, num);
816
817 for (num = 1; num < NR_CP15_REGS; num++)
818 - if (vcpu_cp15(vcpu, num) == 0x42424242)
819 - panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
820 + WARN(vcpu_cp15(vcpu, num) == 0x42424242,
821 + "Didn't reset vcpu_cp15(vcpu, %zi)", num);
822 }
823 diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
824 index 5ed0c3ee33d6..e53327912adc 100644
825 --- a/arch/arm/kvm/reset.c
826 +++ b/arch/arm/kvm/reset.c
827 @@ -26,6 +26,7 @@
828 #include <asm/cputype.h>
829 #include <asm/kvm_arm.h>
830 #include <asm/kvm_coproc.h>
831 +#include <asm/kvm_emulate.h>
832
833 #include <kvm/arm_arch_timer.h>
834
835 @@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
836 /* Reset CP15 registers */
837 kvm_reset_coprocs(vcpu);
838
839 + /*
840 + * Additional reset state handling that PSCI may have imposed on us.
841 + * Must be done after all the sys_reg reset.
842 + */
843 + if (READ_ONCE(vcpu->arch.reset_state.reset)) {
844 + unsigned long target_pc = vcpu->arch.reset_state.pc;
845 +
846 + /* Gracefully handle Thumb2 entry point */
847 + if (target_pc & 1) {
848 + target_pc &= ~1UL;
849 + vcpu_set_thumb(vcpu);
850 + }
851 +
852 + /* Propagate caller endianness */
853 + if (vcpu->arch.reset_state.be)
854 + kvm_vcpu_set_be(vcpu);
855 +
856 + *vcpu_pc(vcpu) = target_pc;
857 + vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
858 +
859 + vcpu->arch.reset_state.reset = false;
860 + }
861 +
862 /* Reset arch_timer context */
863 return kvm_timer_vcpu_reset(vcpu);
864 }
865 diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
866 index a8b291f00109..dae514c8276a 100644
867 --- a/arch/arm/mach-omap2/cpuidle44xx.c
868 +++ b/arch/arm/mach-omap2/cpuidle44xx.c
869 @@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
870 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
871 (cx->mpu_logic_state == PWRDM_POWER_OFF);
872
873 + /* Enter broadcast mode for periodic timers */
874 + tick_broadcast_enable();
875 +
876 + /* Enter broadcast mode for one-shot timers */
877 tick_broadcast_enter();
878
879 /*
880 @@ -218,15 +222,6 @@ fail:
881 return index;
882 }
883
884 -/*
885 - * For each cpu, setup the broadcast timer because local timers
886 - * stops for the states above C1.
887 - */
888 -static void omap_setup_broadcast_timer(void *arg)
889 -{
890 - tick_broadcast_enable();
891 -}
892 -
893 static struct cpuidle_driver omap4_idle_driver = {
894 .name = "omap4_idle",
895 .owner = THIS_MODULE,
896 @@ -319,8 +314,5 @@ int __init omap4_idle_init(void)
897 if (!cpu_clkdm[0] || !cpu_clkdm[1])
898 return -ENODEV;
899
900 - /* Configure the broadcast timer on each cpu */
901 - on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
902 -
903 return cpuidle_register(idle_driver, cpu_online_mask);
904 }
905 diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
906 index 9500b6e27380..5d73f2c0b117 100644
907 --- a/arch/arm/mach-omap2/display.c
908 +++ b/arch/arm/mach-omap2/display.c
909 @@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
910 u32 enable_mask, enable_shift;
911 u32 pipd_mask, pipd_shift;
912 u32 reg;
913 + int ret;
914
915 if (dsi_id == 0) {
916 enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
917 @@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
918 return -ENODEV;
919 }
920
921 - regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg);
922 + ret = regmap_read(omap4_dsi_mux_syscon,
923 + OMAP4_DSIPHY_SYSCON_OFFSET,
924 + &reg);
925 + if (ret)
926 + return ret;
927
928 reg &= ~enable_mask;
929 reg &= ~pipd_mask;
930 diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
931 index 058ce73137e8..5d819b6ea428 100644
932 --- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
933 +++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
934 @@ -65,16 +65,16 @@ static int osiris_dvs_notify(struct notifier_block *nb,
935
936 switch (val) {
937 case CPUFREQ_PRECHANGE:
938 - if (old_dvs & !new_dvs ||
939 - cur_dvs & !new_dvs) {
940 + if ((old_dvs && !new_dvs) ||
941 + (cur_dvs && !new_dvs)) {
942 pr_debug("%s: exiting dvs\n", __func__);
943 cur_dvs = false;
944 gpio_set_value(OSIRIS_GPIO_DVS, 1);
945 }
946 break;
947 case CPUFREQ_POSTCHANGE:
948 - if (!old_dvs & new_dvs ||
949 - !cur_dvs & new_dvs) {
950 + if ((!old_dvs && new_dvs) ||
951 + (!cur_dvs && new_dvs)) {
952 pr_debug("entering dvs\n");
953 cur_dvs = true;
954 gpio_set_value(OSIRIS_GPIO_DVS, 0);
955 diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
956 index 1cb9c0f9b5d6..8211cf45ece1 100644
957 --- a/arch/arm/mm/dma-mapping.c
958 +++ b/arch/arm/mm/dma-mapping.c
959 @@ -2400,4 +2400,6 @@ void arch_teardown_dma_ops(struct device *dev)
960 return;
961
962 arm_teardown_iommu_dma_ops(dev);
963 + /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
964 + set_dma_ops(dev, NULL);
965 }
966 diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
967 index 1ee0dc0d9f10..d1cf404b8708 100644
968 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
969 +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
970 @@ -22,7 +22,7 @@
971 backlight = <&backlight>;
972 power-supply = <&pp3300_disp>;
973
974 - ports {
975 + port {
976 panel_in_edp: endpoint {
977 remote-endpoint = <&edp_out_panel>;
978 };
979 diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
980 index 2cc7c47d6a85..65637a5a4b21 100644
981 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
982 +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
983 @@ -43,7 +43,7 @@
984 backlight = <&backlight>;
985 power-supply = <&pp3300_disp>;
986
987 - ports {
988 + port {
989 panel_in_edp: endpoint {
990 remote-endpoint = <&edp_out_panel>;
991 };
992 diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
993 index fef2c0608999..b14d83919f14 100644
994 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
995 +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
996 @@ -50,7 +50,7 @@
997 pinctrl-0 = <&lcd_panel_reset>;
998 power-supply = <&vcc3v3_s0>;
999
1000 - ports {
1001 + port {
1002 panel_in_edp: endpoint {
1003 remote-endpoint = <&edp_out_panel>;
1004 };
1005 diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
1006 index e3a375c4cb83..1b151442dac1 100644
1007 --- a/arch/arm64/crypto/aes-ce-ccm-core.S
1008 +++ b/arch/arm64/crypto/aes-ce-ccm-core.S
1009 @@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data)
1010 beq 10f
1011 ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
1012 b 7b
1013 -8: mov w7, w8
1014 +8: cbz w8, 91f
1015 + mov w7, w8
1016 add w8, w8, #16
1017 9: ext v1.16b, v1.16b, v1.16b, #1
1018 adds w7, w7, #1
1019 bne 9b
1020 - eor v0.16b, v0.16b, v1.16b
1021 +91: eor v0.16b, v0.16b, v1.16b
1022 st1 {v0.16b}, [x0]
1023 10: str w8, [x3]
1024 ret
1025 diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
1026 index 68b11aa690e4..986191e8c058 100644
1027 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c
1028 +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
1029 @@ -125,7 +125,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
1030 abytes -= added;
1031 }
1032
1033 - while (abytes > AES_BLOCK_SIZE) {
1034 + while (abytes >= AES_BLOCK_SIZE) {
1035 __aes_arm64_encrypt(key->key_enc, mac, mac,
1036 num_rounds(key));
1037 crypto_xor(mac, in, AES_BLOCK_SIZE);
1038 @@ -139,8 +139,6 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
1039 num_rounds(key));
1040 crypto_xor(mac, in, abytes);
1041 *macp = abytes;
1042 - } else {
1043 - *macp = 0;
1044 }
1045 }
1046 }
1047 diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
1048 index e613a87f8b53..8432c8d0dea6 100644
1049 --- a/arch/arm64/crypto/aes-neonbs-core.S
1050 +++ b/arch/arm64/crypto/aes-neonbs-core.S
1051 @@ -971,18 +971,22 @@ CPU_LE( rev x8, x8 )
1052
1053 8: next_ctr v0
1054 st1 {v0.16b}, [x24]
1055 - cbz x23, 0f
1056 + cbz x23, .Lctr_done
1057
1058 cond_yield_neon 98b
1059 b 99b
1060
1061 -0: frame_pop
1062 +.Lctr_done:
1063 + frame_pop
1064 ret
1065
1066 /*
1067 * If we are handling the tail of the input (x6 != NULL), return the
1068 * final keystream block back to the caller.
1069 */
1070 +0: cbz x25, 8b
1071 + st1 {v0.16b}, [x25]
1072 + b 8b
1073 1: cbz x25, 8b
1074 st1 {v1.16b}, [x25]
1075 b 8b
1076 diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
1077 index 96f0cae4a022..617bcfc1b080 100644
1078 --- a/arch/arm64/crypto/crct10dif-ce-glue.c
1079 +++ b/arch/arm64/crypto/crct10dif-ce-glue.c
1080 @@ -36,26 +36,13 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
1081 unsigned int length)
1082 {
1083 u16 *crc = shash_desc_ctx(desc);
1084 - unsigned int l;
1085
1086 - if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
1087 - l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
1088 - ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
1089 -
1090 - *crc = crc_t10dif_generic(*crc, data, l);
1091 -
1092 - length -= l;
1093 - data += l;
1094 - }
1095 -
1096 - if (length > 0) {
1097 - if (may_use_simd()) {
1098 - kernel_neon_begin();
1099 - *crc = crc_t10dif_pmull(*crc, data, length);
1100 - kernel_neon_end();
1101 - } else {
1102 - *crc = crc_t10dif_generic(*crc, data, length);
1103 - }
1104 + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
1105 + kernel_neon_begin();
1106 + *crc = crc_t10dif_pmull(*crc, data, length);
1107 + kernel_neon_end();
1108 + } else {
1109 + *crc = crc_t10dif_generic(*crc, data, length);
1110 }
1111
1112 return 0;
1113 diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
1114 index 1473fc2f7ab7..89691c86640a 100644
1115 --- a/arch/arm64/include/asm/hardirq.h
1116 +++ b/arch/arm64/include/asm/hardirq.h
1117 @@ -17,8 +17,12 @@
1118 #define __ASM_HARDIRQ_H
1119
1120 #include <linux/cache.h>
1121 +#include <linux/percpu.h>
1122 #include <linux/threads.h>
1123 +#include <asm/barrier.h>
1124 #include <asm/irq.h>
1125 +#include <asm/kvm_arm.h>
1126 +#include <asm/sysreg.h>
1127
1128 #define NR_IPI 7
1129
1130 @@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
1131
1132 #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
1133
1134 +struct nmi_ctx {
1135 + u64 hcr;
1136 +};
1137 +
1138 +DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
1139 +
1140 +#define arch_nmi_enter() \
1141 + do { \
1142 + if (is_kernel_in_hyp_mode()) { \
1143 + struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
1144 + nmi_ctx->hcr = read_sysreg(hcr_el2); \
1145 + if (!(nmi_ctx->hcr & HCR_TGE)) { \
1146 + write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
1147 + isb(); \
1148 + } \
1149 + } \
1150 + } while (0)
1151 +
1152 +#define arch_nmi_exit() \
1153 + do { \
1154 + if (is_kernel_in_hyp_mode()) { \
1155 + struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
1156 + if (!(nmi_ctx->hcr & HCR_TGE)) \
1157 + write_sysreg(nmi_ctx->hcr, hcr_el2); \
1158 + } \
1159 + } while (0)
1160 +
1161 static inline void ack_bad_irq(unsigned int irq)
1162 {
1163 extern unsigned long irq_err_count;
1164 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
1165 index 3d6d7336f871..6abe4002945f 100644
1166 --- a/arch/arm64/include/asm/kvm_host.h
1167 +++ b/arch/arm64/include/asm/kvm_host.h
1168 @@ -48,6 +48,7 @@
1169 #define KVM_REQ_SLEEP \
1170 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
1171 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
1172 +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
1173
1174 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
1175
1176 @@ -206,6 +207,13 @@ struct kvm_cpu_context {
1177
1178 typedef struct kvm_cpu_context kvm_cpu_context_t;
1179
1180 +struct vcpu_reset_state {
1181 + unsigned long pc;
1182 + unsigned long r0;
1183 + bool be;
1184 + bool reset;
1185 +};
1186 +
1187 struct kvm_vcpu_arch {
1188 struct kvm_cpu_context ctxt;
1189
1190 @@ -295,6 +303,9 @@ struct kvm_vcpu_arch {
1191 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
1192 u64 vsesr_el2;
1193
1194 + /* Additional reset state */
1195 + struct vcpu_reset_state reset_state;
1196 +
1197 /* True when deferrable sysregs are loaded on the physical CPU,
1198 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
1199 bool sysregs_loaded_on_cpu;
1200 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
1201 index 651a06b1980f..77ca59598c8b 100644
1202 --- a/arch/arm64/kernel/head.S
1203 +++ b/arch/arm64/kernel/head.S
1204 @@ -522,8 +522,7 @@ set_hcr:
1205 /* GICv3 system register access */
1206 mrs x0, id_aa64pfr0_el1
1207 ubfx x0, x0, #24, #4
1208 - cmp x0, #1
1209 - b.ne 3f
1210 + cbz x0, 3f
1211
1212 mrs_s x0, SYS_ICC_SRE_EL2
1213 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
1214 diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
1215 index 780a12f59a8f..92fa81798fb9 100644
1216 --- a/arch/arm64/kernel/irq.c
1217 +++ b/arch/arm64/kernel/irq.c
1218 @@ -33,6 +33,9 @@
1219
1220 unsigned long irq_err_count;
1221
1222 +/* Only access this in an NMI enter/exit */
1223 +DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
1224 +
1225 DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
1226
1227 int arch_show_interrupts(struct seq_file *p, int prec)
1228 diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
1229 index a20de58061a8..35f184a8fd85 100644
1230 --- a/arch/arm64/kernel/kgdb.c
1231 +++ b/arch/arm64/kernel/kgdb.c
1232 @@ -244,27 +244,33 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
1233
1234 static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
1235 {
1236 + if (user_mode(regs))
1237 + return DBG_HOOK_ERROR;
1238 +
1239 kgdb_handle_exception(1, SIGTRAP, 0, regs);
1240 - return 0;
1241 + return DBG_HOOK_HANDLED;
1242 }
1243 NOKPROBE_SYMBOL(kgdb_brk_fn)
1244
1245 static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
1246 {
1247 + if (user_mode(regs))
1248 + return DBG_HOOK_ERROR;
1249 +
1250 compiled_break = 1;
1251 kgdb_handle_exception(1, SIGTRAP, 0, regs);
1252
1253 - return 0;
1254 + return DBG_HOOK_HANDLED;
1255 }
1256 NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
1257
1258 static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
1259 {
1260 - if (!kgdb_single_step)
1261 + if (user_mode(regs) || !kgdb_single_step)
1262 return DBG_HOOK_ERROR;
1263
1264 kgdb_handle_exception(1, SIGTRAP, 0, regs);
1265 - return 0;
1266 + return DBG_HOOK_HANDLED;
1267 }
1268 NOKPROBE_SYMBOL(kgdb_step_brk_fn);
1269
1270 diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
1271 index 30bb13797034..2d63df112b89 100644
1272 --- a/arch/arm64/kernel/probes/kprobes.c
1273 +++ b/arch/arm64/kernel/probes/kprobes.c
1274 @@ -450,6 +450,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
1275 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1276 int retval;
1277
1278 + if (user_mode(regs))
1279 + return DBG_HOOK_ERROR;
1280 +
1281 /* return error if this is not our step */
1282 retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
1283
1284 @@ -466,6 +469,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
1285 int __kprobes
1286 kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
1287 {
1288 + if (user_mode(regs))
1289 + return DBG_HOOK_ERROR;
1290 +
1291 kprobe_handler(regs);
1292 return DBG_HOOK_HANDLED;
1293 }
1294 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
1295 index a1c32c1f2267..6290a4e81d57 100644
1296 --- a/arch/arm64/kvm/hyp/switch.c
1297 +++ b/arch/arm64/kvm/hyp/switch.c
1298 @@ -23,6 +23,7 @@
1299 #include <kvm/arm_psci.h>
1300
1301 #include <asm/cpufeature.h>
1302 +#include <asm/kprobes.h>
1303 #include <asm/kvm_asm.h>
1304 #include <asm/kvm_emulate.h>
1305 #include <asm/kvm_host.h>
1306 @@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
1307
1308 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
1309 }
1310 +NOKPROBE_SYMBOL(activate_traps_vhe);
1311
1312 static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
1313 {
1314 @@ -146,6 +148,7 @@ static void deactivate_traps_vhe(void)
1315 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
1316 write_sysreg(vectors, vbar_el1);
1317 }
1318 +NOKPROBE_SYMBOL(deactivate_traps_vhe);
1319
1320 static void __hyp_text __deactivate_traps_nvhe(void)
1321 {
1322 @@ -529,6 +532,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
1323
1324 return exit_code;
1325 }
1326 +NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
1327
1328 /* Switch to the guest for legacy non-VHE systems */
1329 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
1330 @@ -636,6 +640,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
1331 read_sysreg_el2(esr), read_sysreg_el2(far),
1332 read_sysreg(hpfar_el2), par, vcpu);
1333 }
1334 +NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
1335
1336 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
1337 {
1338 diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
1339 index 9ce223944983..963d669ae3a2 100644
1340 --- a/arch/arm64/kvm/hyp/sysreg-sr.c
1341 +++ b/arch/arm64/kvm/hyp/sysreg-sr.c
1342 @@ -18,6 +18,7 @@
1343 #include <linux/compiler.h>
1344 #include <linux/kvm_host.h>
1345
1346 +#include <asm/kprobes.h>
1347 #include <asm/kvm_asm.h>
1348 #include <asm/kvm_emulate.h>
1349 #include <asm/kvm_hyp.h>
1350 @@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
1351 {
1352 __sysreg_save_common_state(ctxt);
1353 }
1354 +NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
1355
1356 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
1357 {
1358 __sysreg_save_common_state(ctxt);
1359 __sysreg_save_el2_return_state(ctxt);
1360 }
1361 +NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
1362
1363 static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
1364 {
1365 @@ -171,12 +174,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
1366 {
1367 __sysreg_restore_common_state(ctxt);
1368 }
1369 +NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
1370
1371 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
1372 {
1373 __sysreg_restore_common_state(ctxt);
1374 __sysreg_restore_el2_return_state(ctxt);
1375 }
1376 +NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
1377
1378 void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
1379 {
1380 diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
1381 index e37c78bbe1ca..18b9a522a2b3 100644
1382 --- a/arch/arm64/kvm/reset.c
1383 +++ b/arch/arm64/kvm/reset.c
1384 @@ -31,6 +31,7 @@
1385 #include <asm/kvm_arm.h>
1386 #include <asm/kvm_asm.h>
1387 #include <asm/kvm_coproc.h>
1388 +#include <asm/kvm_emulate.h>
1389 #include <asm/kvm_mmu.h>
1390
1391 /*
1392 @@ -99,16 +100,33 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
1393 * This function finds the right table above and sets the registers on
1394 * the virtual CPU struct to their architecturally defined reset
1395 * values.
1396 + *
1397 + * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
1398 + * ioctl or as part of handling a request issued by another VCPU in the PSCI
1399 + * handling code. In the first case, the VCPU will not be loaded, and in the
1400 + * second case the VCPU will be loaded. Because this function operates purely
1401 + * on the memory-backed valus of system registers, we want to do a full put if
1402 + * we were loaded (handling a request) and load the values back at the end of
1403 + * the function. Otherwise we leave the state alone. In both cases, we
1404 + * disable preemption around the vcpu reset as we would otherwise race with
1405 + * preempt notifiers which also call put/load.
1406 */
1407 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
1408 {
1409 const struct kvm_regs *cpu_reset;
1410 + int ret = -EINVAL;
1411 + bool loaded;
1412 +
1413 + preempt_disable();
1414 + loaded = (vcpu->cpu != -1);
1415 + if (loaded)
1416 + kvm_arch_vcpu_put(vcpu);
1417
1418 switch (vcpu->arch.target) {
1419 default:
1420 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
1421 if (!cpu_has_32bit_el1())
1422 - return -EINVAL;
1423 + goto out;
1424 cpu_reset = &default_regs_reset32;
1425 } else {
1426 cpu_reset = &default_regs_reset;
1427 @@ -123,6 +141,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
1428 /* Reset system registers */
1429 kvm_reset_sys_regs(vcpu);
1430
1431 + /*
1432 + * Additional reset state handling that PSCI may have imposed on us.
1433 + * Must be done after all the sys_reg reset.
1434 + */
1435 + if (vcpu->arch.reset_state.reset) {
1436 + unsigned long target_pc = vcpu->arch.reset_state.pc;
1437 +
1438 + /* Gracefully handle Thumb2 entry point */
1439 + if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
1440 + target_pc &= ~1UL;
1441 + vcpu_set_thumb(vcpu);
1442 + }
1443 +
1444 + /* Propagate caller endianness */
1445 + if (vcpu->arch.reset_state.be)
1446 + kvm_vcpu_set_be(vcpu);
1447 +
1448 + *vcpu_pc(vcpu) = target_pc;
1449 + vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
1450 +
1451 + vcpu->arch.reset_state.reset = false;
1452 + }
1453 +
1454 /* Reset PMU */
1455 kvm_pmu_vcpu_reset(vcpu);
1456
1457 @@ -131,5 +172,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
1458 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
1459
1460 /* Reset timer */
1461 - return kvm_timer_vcpu_reset(vcpu);
1462 + ret = kvm_timer_vcpu_reset(vcpu);
1463 +out:
1464 + if (loaded)
1465 + kvm_arch_vcpu_load(vcpu, smp_processor_id());
1466 + preempt_enable();
1467 + return ret;
1468 }
1469 diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
1470 index 22fbbdbece3c..d112af75680b 100644
1471 --- a/arch/arm64/kvm/sys_regs.c
1472 +++ b/arch/arm64/kvm/sys_regs.c
1473 @@ -1456,7 +1456,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
1474
1475 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1476 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1477 - { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
1478 + { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1479 };
1480
1481 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1482 @@ -2586,7 +2586,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
1483 table = get_target_table(vcpu->arch.target, true, &num);
1484 reset_sys_reg_descs(vcpu, table, num);
1485
1486 - for (num = 1; num < NR_SYS_REGS; num++)
1487 - if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
1488 - panic("Didn't reset __vcpu_sys_reg(%zi)", num);
1489 + for (num = 1; num < NR_SYS_REGS; num++) {
1490 + if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
1491 + "Didn't reset __vcpu_sys_reg(%zi)\n", num))
1492 + break;
1493 + }
1494 }
1495 diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
1496 index 997c9f20ea0f..4474e51ee53e 100644
1497 --- a/arch/m68k/Makefile
1498 +++ b/arch/m68k/Makefile
1499 @@ -58,7 +58,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
1500 cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
1501
1502 KBUILD_AFLAGS += $(cpuflags-y)
1503 -KBUILD_CFLAGS += $(cpuflags-y) -pipe
1504 +KBUILD_CFLAGS += $(cpuflags-y)
1505 +
1506 +KBUILD_CFLAGS += -pipe -ffreestanding
1507 +
1508 ifdef CONFIG_MMU
1509 # without -fno-strength-reduce the 53c7xx.c driver fails ;-(
1510 KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
1511 diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
1512 index 2c1c53d12179..f567ace7a9e9 100644
1513 --- a/arch/mips/include/asm/kvm_host.h
1514 +++ b/arch/mips/include/asm/kvm_host.h
1515 @@ -1131,7 +1131,7 @@ static inline void kvm_arch_hardware_unsetup(void) {}
1516 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1517 static inline void kvm_arch_free_memslot(struct kvm *kvm,
1518 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
1519 -static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
1520 +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
1521 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1522 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
1523 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
1524 diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
1525 index 50888388a359..02544939ef0b 100644
1526 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
1527 +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
1528 @@ -35,6 +35,14 @@ static inline int hstate_get_psize(struct hstate *hstate)
1529 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1530 static inline bool gigantic_page_supported(void)
1531 {
1532 + /*
1533 + * We used gigantic page reservation with hypervisor assist in some case.
1534 + * We cannot use runtime allocation of gigantic pages in those platforms
1535 + * This is hash translation mode LPARs.
1536 + */
1537 + if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
1538 + return false;
1539 +
1540 return true;
1541 }
1542 #endif
1543 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
1544 index 906bcbdfd2a1..bccc5051249e 100644
1545 --- a/arch/powerpc/include/asm/kvm_host.h
1546 +++ b/arch/powerpc/include/asm/kvm_host.h
1547 @@ -822,7 +822,7 @@ struct kvm_vcpu_arch {
1548 static inline void kvm_arch_hardware_disable(void) {}
1549 static inline void kvm_arch_hardware_unsetup(void) {}
1550 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1551 -static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
1552 +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
1553 static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
1554 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1555 static inline void kvm_arch_exit(void) {}
1556 diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
1557 index 2f3ff7a27881..d85fcfea32ca 100644
1558 --- a/arch/powerpc/include/asm/powernv.h
1559 +++ b/arch/powerpc/include/asm/powernv.h
1560 @@ -23,6 +23,8 @@ extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
1561 unsigned long *flags, unsigned long *status,
1562 int count);
1563
1564 +void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val);
1565 +
1566 void pnv_tm_init(void);
1567 #else
1568 static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { }
1569 diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
1570 index e58c3f467db5..26b3f853cbf6 100644
1571 --- a/arch/powerpc/kernel/entry_32.S
1572 +++ b/arch/powerpc/kernel/entry_32.S
1573 @@ -745,6 +745,9 @@ fast_exception_return:
1574 mtcr r10
1575 lwz r10,_LINK(r11)
1576 mtlr r10
1577 + /* Clear the exception_marker on the stack to avoid confusing stacktrace */
1578 + li r10, 0
1579 + stw r10, 8(r11)
1580 REST_GPR(10, r11)
1581 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
1582 mtspr SPRN_NRI, r0
1583 @@ -982,6 +985,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
1584 mtcrf 0xFF,r10
1585 mtlr r11
1586
1587 + /* Clear the exception_marker on the stack to avoid confusing stacktrace */
1588 + li r10, 0
1589 + stw r10, 8(r1)
1590 /*
1591 * Once we put values in SRR0 and SRR1, we are in a state
1592 * where exceptions are not recoverable, since taking an
1593 @@ -1021,6 +1027,9 @@ exc_exit_restart_end:
1594 mtlr r11
1595 lwz r10,_CCR(r1)
1596 mtcrf 0xff,r10
1597 + /* Clear the exception_marker on the stack to avoid confusing stacktrace */
1598 + li r10, 0
1599 + stw r10, 8(r1)
1600 REST_2GPRS(9, r1)
1601 .globl exc_exit_restart
1602 exc_exit_restart:
1603 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
1604 index bb6ac471a784..d29f2dca725b 100644
1605 --- a/arch/powerpc/kernel/process.c
1606 +++ b/arch/powerpc/kernel/process.c
1607 @@ -180,7 +180,7 @@ static void __giveup_fpu(struct task_struct *tsk)
1608
1609 save_fpu(tsk);
1610 msr = tsk->thread.regs->msr;
1611 - msr &= ~MSR_FP;
1612 + msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
1613 #ifdef CONFIG_VSX
1614 if (cpu_has_feature(CPU_FTR_VSX))
1615 msr &= ~MSR_VSX;
1616 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
1617 index 9667666eb18e..e08b32ccf1d9 100644
1618 --- a/arch/powerpc/kernel/ptrace.c
1619 +++ b/arch/powerpc/kernel/ptrace.c
1620 @@ -561,6 +561,7 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
1621 /*
1622 * Copy out only the low-order word of vrsave.
1623 */
1624 + int start, end;
1625 union {
1626 elf_vrreg_t reg;
1627 u32 word;
1628 @@ -569,8 +570,10 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
1629
1630 vrsave.word = target->thread.vrsave;
1631
1632 + start = 33 * sizeof(vector128);
1633 + end = start + sizeof(vrsave);
1634 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1635 - 33 * sizeof(vector128), -1);
1636 + start, end);
1637 }
1638
1639 return ret;
1640 @@ -608,6 +611,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
1641 /*
1642 * We use only the first word of vrsave.
1643 */
1644 + int start, end;
1645 union {
1646 elf_vrreg_t reg;
1647 u32 word;
1648 @@ -616,8 +620,10 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
1649
1650 vrsave.word = target->thread.vrsave;
1651
1652 + start = 33 * sizeof(vector128);
1653 + end = start + sizeof(vrsave);
1654 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1655 - 33 * sizeof(vector128), -1);
1656 + start, end);
1657 if (!ret)
1658 target->thread.vrsave = vrsave.word;
1659 }
1660 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
1661 index 8689a02b7df8..02fe6d020174 100644
1662 --- a/arch/powerpc/kernel/traps.c
1663 +++ b/arch/powerpc/kernel/traps.c
1664 @@ -767,15 +767,15 @@ void machine_check_exception(struct pt_regs *regs)
1665 if (check_io_access(regs))
1666 goto bail;
1667
1668 - /* Must die if the interrupt is not recoverable */
1669 - if (!(regs->msr & MSR_RI))
1670 - nmi_panic(regs, "Unrecoverable Machine check");
1671 -
1672 if (!nested)
1673 nmi_exit();
1674
1675 die("Machine check", regs, SIGBUS);
1676
1677 + /* Must die if the interrupt is not recoverable */
1678 + if (!(regs->msr & MSR_RI))
1679 + nmi_panic(regs, "Unrecoverable Machine check");
1680 +
1681 return;
1682
1683 bail:
1684 @@ -1545,8 +1545,8 @@ bail:
1685
1686 void StackOverflow(struct pt_regs *regs)
1687 {
1688 - printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1689 - current, regs->gpr[1]);
1690 + pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
1691 + current->comm, task_pid_nr(current), regs->gpr[1]);
1692 debugger(regs);
1693 show_regs(regs);
1694 panic("kernel stack overflow");
1695 diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
1696 index 3d1ecd211776..8137f77abad5 100644
1697 --- a/arch/powerpc/platforms/83xx/suspend-asm.S
1698 +++ b/arch/powerpc/platforms/83xx/suspend-asm.S
1699 @@ -26,13 +26,13 @@
1700 #define SS_MSR 0x74
1701 #define SS_SDR1 0x78
1702 #define SS_LR 0x7c
1703 -#define SS_SPRG 0x80 /* 4 SPRGs */
1704 -#define SS_DBAT 0x90 /* 8 DBATs */
1705 -#define SS_IBAT 0xd0 /* 8 IBATs */
1706 -#define SS_TB 0x110
1707 -#define SS_CR 0x118
1708 -#define SS_GPREG 0x11c /* r12-r31 */
1709 -#define STATE_SAVE_SIZE 0x16c
1710 +#define SS_SPRG 0x80 /* 8 SPRGs */
1711 +#define SS_DBAT 0xa0 /* 8 DBATs */
1712 +#define SS_IBAT 0xe0 /* 8 IBATs */
1713 +#define SS_TB 0x120
1714 +#define SS_CR 0x128
1715 +#define SS_GPREG 0x12c /* r12-r31 */
1716 +#define STATE_SAVE_SIZE 0x17c
1717
1718 .section .data
1719 .align 5
1720 @@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep)
1721 stw r7, SS_SPRG+12(r3)
1722 stw r8, SS_SDR1(r3)
1723
1724 + mfspr r4, SPRN_SPRG4
1725 + mfspr r5, SPRN_SPRG5
1726 + mfspr r6, SPRN_SPRG6
1727 + mfspr r7, SPRN_SPRG7
1728 +
1729 + stw r4, SS_SPRG+16(r3)
1730 + stw r5, SS_SPRG+20(r3)
1731 + stw r6, SS_SPRG+24(r3)
1732 + stw r7, SS_SPRG+28(r3)
1733 +
1734 mfspr r4, SPRN_DBAT0U
1735 mfspr r5, SPRN_DBAT0L
1736 mfspr r6, SPRN_DBAT1U
1737 @@ -493,6 +503,16 @@ mpc83xx_deep_resume:
1738 mtspr SPRN_IBAT7U, r6
1739 mtspr SPRN_IBAT7L, r7
1740
1741 + lwz r4, SS_SPRG+16(r3)
1742 + lwz r5, SS_SPRG+20(r3)
1743 + lwz r6, SS_SPRG+24(r3)
1744 + lwz r7, SS_SPRG+28(r3)
1745 +
1746 + mtspr SPRN_SPRG4, r4
1747 + mtspr SPRN_SPRG5, r5
1748 + mtspr SPRN_SPRG6, r6
1749 + mtspr SPRN_SPRG7, r7
1750 +
1751 lwz r4, SS_SPRG+0(r3)
1752 lwz r5, SS_SPRG+4(r3)
1753 lwz r6, SS_SPRG+8(r3)
1754 diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
1755 index 403523c061ba..343bffd20fca 100644
1756 --- a/arch/powerpc/platforms/embedded6xx/wii.c
1757 +++ b/arch/powerpc/platforms/embedded6xx/wii.c
1758 @@ -83,6 +83,10 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top)
1759 /* MEM2 64MB@0x10000000 */
1760 delta = wii_hole_start + wii_hole_size;
1761 size = top - delta;
1762 +
1763 + if (__map_without_bats)
1764 + return delta;
1765 +
1766 for (bl = 128<<10; bl < max_size; bl <<= 1) {
1767 if (bl * 2 > size)
1768 break;
1769 diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
1770 index 35f699ebb662..e52f9b06dd9c 100644
1771 --- a/arch/powerpc/platforms/powernv/idle.c
1772 +++ b/arch/powerpc/platforms/powernv/idle.c
1773 @@ -458,7 +458,8 @@ EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
1774 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1775
1776 #ifdef CONFIG_HOTPLUG_CPU
1777 -static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
1778 +
1779 +void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
1780 {
1781 u64 pir = get_hard_smp_processor_id(cpu);
1782
1783 @@ -481,20 +482,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
1784 {
1785 unsigned long srr1;
1786 u32 idle_states = pnv_get_supported_cpuidle_states();
1787 - u64 lpcr_val;
1788 -
1789 - /*
1790 - * We don't want to take decrementer interrupts while we are
1791 - * offline, so clear LPCR:PECE1. We keep PECE2 (and
1792 - * LPCR_PECE_HVEE on P9) enabled as to let IPIs in.
1793 - *
1794 - * If the CPU gets woken up by a special wakeup, ensure that
1795 - * the SLW engine sets LPCR with decrementer bit cleared, else
1796 - * the CPU will come back to the kernel due to a spurious
1797 - * wakeup.
1798 - */
1799 - lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
1800 - pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
1801
1802 __ppc64_runlatch_off();
1803
1804 @@ -526,16 +513,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
1805
1806 __ppc64_runlatch_on();
1807
1808 - /*
1809 - * Re-enable decrementer interrupts in LPCR.
1810 - *
1811 - * Further, we want stop states to be woken up by decrementer
1812 - * for non-hotplug cases. So program the LPCR via stop api as
1813 - * well.
1814 - */
1815 - lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
1816 - pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
1817 -
1818 return srr1;
1819 }
1820 #endif
1821 diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
1822 index acd3206dfae3..06628c71cef6 100644
1823 --- a/arch/powerpc/platforms/powernv/opal-msglog.c
1824 +++ b/arch/powerpc/platforms/powernv/opal-msglog.c
1825 @@ -98,7 +98,7 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
1826 }
1827
1828 static struct bin_attribute opal_msglog_attr = {
1829 - .attr = {.name = "msglog", .mode = 0444},
1830 + .attr = {.name = "msglog", .mode = 0400},
1831 .read = opal_msglog_read
1832 };
1833
1834 diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
1835 index 0d354e19ef92..db09c7022635 100644
1836 --- a/arch/powerpc/platforms/powernv/smp.c
1837 +++ b/arch/powerpc/platforms/powernv/smp.c
1838 @@ -39,6 +39,7 @@
1839 #include <asm/cpuidle.h>
1840 #include <asm/kexec.h>
1841 #include <asm/reg.h>
1842 +#include <asm/powernv.h>
1843
1844 #include "powernv.h"
1845
1846 @@ -153,6 +154,7 @@ static void pnv_smp_cpu_kill_self(void)
1847 {
1848 unsigned int cpu;
1849 unsigned long srr1, wmask;
1850 + u64 lpcr_val;
1851
1852 /* Standard hot unplug procedure */
1853 /*
1854 @@ -174,6 +176,19 @@ static void pnv_smp_cpu_kill_self(void)
1855 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1856 wmask = SRR1_WAKEMASK_P8;
1857
1858 + /*
1859 + * We don't want to take decrementer interrupts while we are
1860 + * offline, so clear LPCR:PECE1. We keep PECE2 (and
1861 + * LPCR_PECE_HVEE on P9) enabled so as to let IPIs in.
1862 + *
1863 + * If the CPU gets woken up by a special wakeup, ensure that
1864 + * the SLW engine sets LPCR with decrementer bit cleared, else
1865 + * the CPU will come back to the kernel due to a spurious
1866 + * wakeup.
1867 + */
1868 + lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
1869 + pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
1870 +
1871 while (!generic_check_cpu_restart(cpu)) {
1872 /*
1873 * Clear IPI flag, since we don't handle IPIs while
1874 @@ -246,6 +261,16 @@ static void pnv_smp_cpu_kill_self(void)
1875
1876 }
1877
1878 + /*
1879 + * Re-enable decrementer interrupts in LPCR.
1880 + *
1881 + * Further, we want stop states to be woken up by decrementer
1882 + * for non-hotplug cases. So program the LPCR via stop api as
1883 + * well.
1884 + */
1885 + lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
1886 + pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
1887 +
1888 DBG("CPU%d coming online...\n", cpu);
1889 }
1890
1891 diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
1892 index 29c940bf8506..dad110e9f41b 100644
1893 --- a/arch/s390/include/asm/kvm_host.h
1894 +++ b/arch/s390/include/asm/kvm_host.h
1895 @@ -865,7 +865,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
1896 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1897 static inline void kvm_arch_free_memslot(struct kvm *kvm,
1898 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
1899 -static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
1900 +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
1901 static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
1902 static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1903 struct kvm_memory_slot *slot) {}
1904 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
1905 index a0097f8bada8..5f85e0dfa66d 100644
1906 --- a/arch/s390/kernel/setup.c
1907 +++ b/arch/s390/kernel/setup.c
1908 @@ -303,7 +303,7 @@ early_param("vmalloc", parse_vmalloc);
1909
1910 void *restart_stack __section(.data);
1911
1912 -static void __init setup_lowcore(void)
1913 +static void __init setup_lowcore_dat_off(void)
1914 {
1915 struct lowcore *lc;
1916
1917 @@ -314,19 +314,16 @@ static void __init setup_lowcore(void)
1918 lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
1919 lc->restart_psw.mask = PSW_KERNEL_BITS;
1920 lc->restart_psw.addr = (unsigned long) restart_int_handler;
1921 - lc->external_new_psw.mask = PSW_KERNEL_BITS |
1922 - PSW_MASK_DAT | PSW_MASK_MCHECK;
1923 + lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
1924 lc->external_new_psw.addr = (unsigned long) ext_int_handler;
1925 lc->svc_new_psw.mask = PSW_KERNEL_BITS |
1926 - PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
1927 + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
1928 lc->svc_new_psw.addr = (unsigned long) system_call;
1929 - lc->program_new_psw.mask = PSW_KERNEL_BITS |
1930 - PSW_MASK_DAT | PSW_MASK_MCHECK;
1931 + lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
1932 lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
1933 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
1934 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
1935 - lc->io_new_psw.mask = PSW_KERNEL_BITS |
1936 - PSW_MASK_DAT | PSW_MASK_MCHECK;
1937 + lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
1938 lc->io_new_psw.addr = (unsigned long) io_int_handler;
1939 lc->clock_comparator = clock_comparator_max;
1940 lc->kernel_stack = ((unsigned long) &init_thread_union)
1941 @@ -388,6 +385,16 @@ static void __init setup_lowcore(void)
1942 lowcore_ptr[0] = lc;
1943 }
1944
1945 +static void __init setup_lowcore_dat_on(void)
1946 +{
1947 + __ctl_clear_bit(0, 28);
1948 + S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
1949 + S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
1950 + S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
1951 + S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
1952 + __ctl_set_bit(0, 28);
1953 +}
1954 +
1955 static struct resource code_resource = {
1956 .name = "Kernel code",
1957 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
1958 @@ -946,7 +953,7 @@ void __init setup_arch(char **cmdline_p)
1959 #endif
1960
1961 setup_resources();
1962 - setup_lowcore();
1963 + setup_lowcore_dat_off();
1964 smp_fill_possible_mask();
1965 cpu_detect_mhz_feature();
1966 cpu_init();
1967 @@ -959,6 +966,12 @@ void __init setup_arch(char **cmdline_p)
1968 */
1969 paging_init();
1970
1971 + /*
1972 + * After paging_init created the kernel page table, the new PSWs
1973 + * in lowcore can now run with DAT enabled.
1974 + */
1975 + setup_lowcore_dat_on();
1976 +
1977 /* Setup default console */
1978 conmode_default();
1979 set_preferred_console();
1980 diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
1981 index 2a356b948720..3ea71b871813 100644
1982 --- a/arch/x86/crypto/aegis128-aesni-glue.c
1983 +++ b/arch/x86/crypto/aegis128-aesni-glue.c
1984 @@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
1985 }
1986
1987 static void crypto_aegis128_aesni_process_crypt(
1988 - struct aegis_state *state, struct aead_request *req,
1989 + struct aegis_state *state, struct skcipher_walk *walk,
1990 const struct aegis_crypt_ops *ops)
1991 {
1992 - struct skcipher_walk walk;
1993 - u8 *src, *dst;
1994 - unsigned int chunksize, base;
1995 -
1996 - ops->skcipher_walk_init(&walk, req, false);
1997 -
1998 - while (walk.nbytes) {
1999 - src = walk.src.virt.addr;
2000 - dst = walk.dst.virt.addr;
2001 - chunksize = walk.nbytes;
2002 -
2003 - ops->crypt_blocks(state, chunksize, src, dst);
2004 -
2005 - base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
2006 - src += base;
2007 - dst += base;
2008 - chunksize &= AEGIS128_BLOCK_SIZE - 1;
2009 -
2010 - if (chunksize > 0)
2011 - ops->crypt_tail(state, chunksize, src, dst);
2012 + while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
2013 + ops->crypt_blocks(state,
2014 + round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
2015 + walk->src.virt.addr, walk->dst.virt.addr);
2016 + skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
2017 + }
2018
2019 - skcipher_walk_done(&walk, 0);
2020 + if (walk->nbytes) {
2021 + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
2022 + walk->dst.virt.addr);
2023 + skcipher_walk_done(walk, 0);
2024 }
2025 }
2026
2027 @@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
2028 {
2029 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2030 struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
2031 + struct skcipher_walk walk;
2032 struct aegis_state state;
2033
2034 + ops->skcipher_walk_init(&walk, req, true);
2035 +
2036 kernel_fpu_begin();
2037
2038 crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
2039 crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
2040 - crypto_aegis128_aesni_process_crypt(&state, req, ops);
2041 + crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
2042 crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
2043
2044 kernel_fpu_end();
2045 diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
2046 index dbe8bb980da1..1b1b39c66c5e 100644
2047 --- a/arch/x86/crypto/aegis128l-aesni-glue.c
2048 +++ b/arch/x86/crypto/aegis128l-aesni-glue.c
2049 @@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
2050 }
2051
2052 static void crypto_aegis128l_aesni_process_crypt(
2053 - struct aegis_state *state, struct aead_request *req,
2054 + struct aegis_state *state, struct skcipher_walk *walk,
2055 const struct aegis_crypt_ops *ops)
2056 {
2057 - struct skcipher_walk walk;
2058 - u8 *src, *dst;
2059 - unsigned int chunksize, base;
2060 -
2061 - ops->skcipher_walk_init(&walk, req, false);
2062 -
2063 - while (walk.nbytes) {
2064 - src = walk.src.virt.addr;
2065 - dst = walk.dst.virt.addr;
2066 - chunksize = walk.nbytes;
2067 -
2068 - ops->crypt_blocks(state, chunksize, src, dst);
2069 -
2070 - base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
2071 - src += base;
2072 - dst += base;
2073 - chunksize &= AEGIS128L_BLOCK_SIZE - 1;
2074 -
2075 - if (chunksize > 0)
2076 - ops->crypt_tail(state, chunksize, src, dst);
2077 + while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
2078 + ops->crypt_blocks(state, round_down(walk->nbytes,
2079 + AEGIS128L_BLOCK_SIZE),
2080 + walk->src.virt.addr, walk->dst.virt.addr);
2081 + skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
2082 + }
2083
2084 - skcipher_walk_done(&walk, 0);
2085 + if (walk->nbytes) {
2086 + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
2087 + walk->dst.virt.addr);
2088 + skcipher_walk_done(walk, 0);
2089 }
2090 }
2091
2092 @@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
2093 {
2094 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2095 struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
2096 + struct skcipher_walk walk;
2097 struct aegis_state state;
2098
2099 + ops->skcipher_walk_init(&walk, req, true);
2100 +
2101 kernel_fpu_begin();
2102
2103 crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
2104 crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
2105 - crypto_aegis128l_aesni_process_crypt(&state, req, ops);
2106 + crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
2107 crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
2108
2109 kernel_fpu_end();
2110 diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
2111 index 8bebda2de92f..6227ca3220a0 100644
2112 --- a/arch/x86/crypto/aegis256-aesni-glue.c
2113 +++ b/arch/x86/crypto/aegis256-aesni-glue.c
2114 @@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
2115 }
2116
2117 static void crypto_aegis256_aesni_process_crypt(
2118 - struct aegis_state *state, struct aead_request *req,
2119 + struct aegis_state *state, struct skcipher_walk *walk,
2120 const struct aegis_crypt_ops *ops)
2121 {
2122 - struct skcipher_walk walk;
2123 - u8 *src, *dst;
2124 - unsigned int chunksize, base;
2125 -
2126 - ops->skcipher_walk_init(&walk, req, false);
2127 -
2128 - while (walk.nbytes) {
2129 - src = walk.src.virt.addr;
2130 - dst = walk.dst.virt.addr;
2131 - chunksize = walk.nbytes;
2132 -
2133 - ops->crypt_blocks(state, chunksize, src, dst);
2134 -
2135 - base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
2136 - src += base;
2137 - dst += base;
2138 - chunksize &= AEGIS256_BLOCK_SIZE - 1;
2139 -
2140 - if (chunksize > 0)
2141 - ops->crypt_tail(state, chunksize, src, dst);
2142 + while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
2143 + ops->crypt_blocks(state,
2144 + round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
2145 + walk->src.virt.addr, walk->dst.virt.addr);
2146 + skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
2147 + }
2148
2149 - skcipher_walk_done(&walk, 0);
2150 + if (walk->nbytes) {
2151 + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
2152 + walk->dst.virt.addr);
2153 + skcipher_walk_done(walk, 0);
2154 }
2155 }
2156
2157 @@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req,
2158 {
2159 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2160 struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
2161 + struct skcipher_walk walk;
2162 struct aegis_state state;
2163
2164 + ops->skcipher_walk_init(&walk, req, true);
2165 +
2166 kernel_fpu_begin();
2167
2168 crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
2169 crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
2170 - crypto_aegis256_aesni_process_crypt(&state, req, ops);
2171 + crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
2172 crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
2173
2174 kernel_fpu_end();
2175 diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
2176 index e4b78f962874..917f25e4d0a8 100644
2177 --- a/arch/x86/crypto/aesni-intel_glue.c
2178 +++ b/arch/x86/crypto/aesni-intel_glue.c
2179 @@ -830,11 +830,14 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
2180 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
2181 }
2182
2183 - src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
2184 - scatterwalk_start(&src_sg_walk, src_sg);
2185 - if (req->src != req->dst) {
2186 - dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
2187 - scatterwalk_start(&dst_sg_walk, dst_sg);
2188 + if (left) {
2189 + src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
2190 + scatterwalk_start(&src_sg_walk, src_sg);
2191 + if (req->src != req->dst) {
2192 + dst_sg = scatterwalk_ffwd(dst_start, req->dst,
2193 + req->assoclen);
2194 + scatterwalk_start(&dst_sg_walk, dst_sg);
2195 + }
2196 }
2197
2198 kernel_fpu_begin();
2199 diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
2200 index 0dccdda1eb3a..7e600f8bcdad 100644
2201 --- a/arch/x86/crypto/morus1280_glue.c
2202 +++ b/arch/x86/crypto/morus1280_glue.c
2203 @@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad(
2204
2205 static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
2206 struct morus1280_ops ops,
2207 - struct aead_request *req)
2208 + struct skcipher_walk *walk)
2209 {
2210 - struct skcipher_walk walk;
2211 - u8 *cursor_src, *cursor_dst;
2212 - unsigned int chunksize, base;
2213 -
2214 - ops.skcipher_walk_init(&walk, req, false);
2215 -
2216 - while (walk.nbytes) {
2217 - cursor_src = walk.src.virt.addr;
2218 - cursor_dst = walk.dst.virt.addr;
2219 - chunksize = walk.nbytes;
2220 -
2221 - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
2222 -
2223 - base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1);
2224 - cursor_src += base;
2225 - cursor_dst += base;
2226 - chunksize &= MORUS1280_BLOCK_SIZE - 1;
2227 -
2228 - if (chunksize > 0)
2229 - ops.crypt_tail(state, cursor_src, cursor_dst,
2230 - chunksize);
2231 + while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
2232 + ops.crypt_blocks(state, walk->src.virt.addr,
2233 + walk->dst.virt.addr,
2234 + round_down(walk->nbytes,
2235 + MORUS1280_BLOCK_SIZE));
2236 + skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
2237 + }
2238
2239 - skcipher_walk_done(&walk, 0);
2240 + if (walk->nbytes) {
2241 + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
2242 + walk->nbytes);
2243 + skcipher_walk_done(walk, 0);
2244 }
2245 }
2246
2247 @@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req,
2248 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2249 struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
2250 struct morus1280_state state;
2251 + struct skcipher_walk walk;
2252 +
2253 + ops.skcipher_walk_init(&walk, req, true);
2254
2255 kernel_fpu_begin();
2256
2257 ctx->ops->init(&state, &ctx->key, req->iv);
2258 crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
2259 - crypto_morus1280_glue_process_crypt(&state, ops, req);
2260 + crypto_morus1280_glue_process_crypt(&state, ops, &walk);
2261 ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
2262
2263 kernel_fpu_end();
2264 diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
2265 index 7b58fe4d9bd1..cb3a81732016 100644
2266 --- a/arch/x86/crypto/morus640_glue.c
2267 +++ b/arch/x86/crypto/morus640_glue.c
2268 @@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad(
2269
2270 static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
2271 struct morus640_ops ops,
2272 - struct aead_request *req)
2273 + struct skcipher_walk *walk)
2274 {
2275 - struct skcipher_walk walk;
2276 - u8 *cursor_src, *cursor_dst;
2277 - unsigned int chunksize, base;
2278 -
2279 - ops.skcipher_walk_init(&walk, req, false);
2280 -
2281 - while (walk.nbytes) {
2282 - cursor_src = walk.src.virt.addr;
2283 - cursor_dst = walk.dst.virt.addr;
2284 - chunksize = walk.nbytes;
2285 -
2286 - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
2287 -
2288 - base = chunksize & ~(MORUS640_BLOCK_SIZE - 1);
2289 - cursor_src += base;
2290 - cursor_dst += base;
2291 - chunksize &= MORUS640_BLOCK_SIZE - 1;
2292 -
2293 - if (chunksize > 0)
2294 - ops.crypt_tail(state, cursor_src, cursor_dst,
2295 - chunksize);
2296 + while (walk->nbytes >= MORUS640_BLOCK_SIZE) {
2297 + ops.crypt_blocks(state, walk->src.virt.addr,
2298 + walk->dst.virt.addr,
2299 + round_down(walk->nbytes, MORUS640_BLOCK_SIZE));
2300 + skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE);
2301 + }
2302
2303 - skcipher_walk_done(&walk, 0);
2304 + if (walk->nbytes) {
2305 + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
2306 + walk->nbytes);
2307 + skcipher_walk_done(walk, 0);
2308 }
2309 }
2310
2311 @@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req,
2312 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2313 struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
2314 struct morus640_state state;
2315 + struct skcipher_walk walk;
2316 +
2317 + ops.skcipher_walk_init(&walk, req, true);
2318
2319 kernel_fpu_begin();
2320
2321 ctx->ops->init(&state, &ctx->key, req->iv);
2322 crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
2323 - crypto_morus640_glue_process_crypt(&state, ops, req);
2324 + crypto_morus640_glue_process_crypt(&state, ops, &walk);
2325 ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
2326
2327 kernel_fpu_end();
2328 diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
2329 index 27a461414b30..2690135bf83f 100644
2330 --- a/arch/x86/events/intel/uncore.c
2331 +++ b/arch/x86/events/intel/uncore.c
2332 @@ -740,6 +740,7 @@ static int uncore_pmu_event_init(struct perf_event *event)
2333 /* fixed counters have event field hardcoded to zero */
2334 hwc->config = 0ULL;
2335 } else if (is_freerunning_event(event)) {
2336 + hwc->config = event->attr.config;
2337 if (!check_valid_freerunning_event(box, event))
2338 return -EINVAL;
2339 event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
2340 diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
2341 index e17ab885b1e9..cc6dd4f78158 100644
2342 --- a/arch/x86/events/intel/uncore.h
2343 +++ b/arch/x86/events/intel/uncore.h
2344 @@ -285,8 +285,8 @@ static inline
2345 unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
2346 struct perf_event *event)
2347 {
2348 - unsigned int type = uncore_freerunning_type(event->attr.config);
2349 - unsigned int idx = uncore_freerunning_idx(event->attr.config);
2350 + unsigned int type = uncore_freerunning_type(event->hw.config);
2351 + unsigned int idx = uncore_freerunning_idx(event->hw.config);
2352 struct intel_uncore_pmu *pmu = box->pmu;
2353
2354 return pmu->type->freerunning[type].counter_base +
2355 @@ -360,7 +360,7 @@ static inline
2356 unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
2357 struct perf_event *event)
2358 {
2359 - unsigned int type = uncore_freerunning_type(event->attr.config);
2360 + unsigned int type = uncore_freerunning_type(event->hw.config);
2361
2362 return box->pmu->type->freerunning[type].bits;
2363 }
2364 @@ -368,7 +368,7 @@ unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
2365 static inline int uncore_num_freerunning(struct intel_uncore_box *box,
2366 struct perf_event *event)
2367 {
2368 - unsigned int type = uncore_freerunning_type(event->attr.config);
2369 + unsigned int type = uncore_freerunning_type(event->hw.config);
2370
2371 return box->pmu->type->freerunning[type].num_counters;
2372 }
2373 @@ -382,8 +382,8 @@ static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
2374 static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
2375 struct perf_event *event)
2376 {
2377 - unsigned int type = uncore_freerunning_type(event->attr.config);
2378 - unsigned int idx = uncore_freerunning_idx(event->attr.config);
2379 + unsigned int type = uncore_freerunning_type(event->hw.config);
2380 + unsigned int idx = uncore_freerunning_idx(event->hw.config);
2381
2382 return (type < uncore_num_freerunning_types(box, event)) &&
2383 (idx < uncore_num_freerunning(box, event));
2384 diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
2385 index bfa25814fe5f..2d328386f83a 100644
2386 --- a/arch/x86/events/intel/uncore_snb.c
2387 +++ b/arch/x86/events/intel/uncore_snb.c
2388 @@ -444,9 +444,11 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
2389
2390 /* must be done before validate_group */
2391 event->hw.event_base = base;
2392 - event->hw.config = cfg;
2393 event->hw.idx = idx;
2394
2395 + /* Convert to standard encoding format for freerunning counters */
2396 + event->hw.config = ((cfg - 1) << 8) | 0x10ff;
2397 +
2398 /* no group validation needed, we have free running counters */
2399
2400 return 0;
2401 diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
2402 index 7ed08a7c3398..0ad25cc895ae 100644
2403 --- a/arch/x86/include/asm/intel-family.h
2404 +++ b/arch/x86/include/asm/intel-family.h
2405 @@ -55,6 +55,8 @@
2406
2407 #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
2408
2409 +#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
2410 +
2411 /* "Small Core" Processors (Atom) */
2412
2413 #define INTEL_FAM6_ATOM_PINEVIEW 0x1C
2414 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
2415 index 728dc661ebb6..46f0b621bd37 100644
2416 --- a/arch/x86/include/asm/kvm_host.h
2417 +++ b/arch/x86/include/asm/kvm_host.h
2418 @@ -1194,7 +1194,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
2419 struct kvm_memory_slot *slot,
2420 gfn_t gfn_offset, unsigned long mask);
2421 void kvm_mmu_zap_all(struct kvm *kvm);
2422 -void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
2423 +void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
2424 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
2425 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
2426
2427 diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
2428 index 6adf6e6c2933..544bd41a514c 100644
2429 --- a/arch/x86/kernel/kprobes/opt.c
2430 +++ b/arch/x86/kernel/kprobes/opt.c
2431 @@ -141,6 +141,11 @@ asm (
2432
2433 void optprobe_template_func(void);
2434 STACK_FRAME_NON_STANDARD(optprobe_template_func);
2435 +NOKPROBE_SYMBOL(optprobe_template_func);
2436 +NOKPROBE_SYMBOL(optprobe_template_entry);
2437 +NOKPROBE_SYMBOL(optprobe_template_val);
2438 +NOKPROBE_SYMBOL(optprobe_template_call);
2439 +NOKPROBE_SYMBOL(optprobe_template_end);
2440
2441 #define TMPL_MOVE_IDX \
2442 ((long)optprobe_template_val - (long)optprobe_template_entry)
2443 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
2444 index 1b82bc7c3cca..779ed52047d1 100644
2445 --- a/arch/x86/kvm/mmu.c
2446 +++ b/arch/x86/kvm/mmu.c
2447 @@ -5774,13 +5774,30 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
2448 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
2449 }
2450
2451 -void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
2452 +void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
2453 {
2454 + gen &= MMIO_GEN_MASK;
2455 +
2456 + /*
2457 + * Shift to eliminate the "update in-progress" flag, which isn't
2458 + * included in the spte's generation number.
2459 + */
2460 + gen >>= 1;
2461 +
2462 + /*
2463 + * Generation numbers are incremented in multiples of the number of
2464 + * address spaces in order to provide unique generations across all
2465 + * address spaces. Strip what is effectively the address space
2466 + * modifier prior to checking for a wrap of the MMIO generation so
2467 + * that a wrap in any address space is detected.
2468 + */
2469 + gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
2470 +
2471 /*
2472 - * The very rare case: if the generation-number is round,
2473 + * The very rare case: if the MMIO generation number has wrapped,
2474 * zap all shadow pages.
2475 */
2476 - if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
2477 + if (unlikely(gen == 0)) {
2478 kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
2479 kvm_mmu_invalidate_zap_all_pages(kvm);
2480 }
2481 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
2482 index f6da5c37d2e8..4029d3783e18 100644
2483 --- a/arch/x86/kvm/vmx.c
2484 +++ b/arch/x86/kvm/vmx.c
2485 @@ -8184,25 +8184,50 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
2486 /* Addr = segment_base + offset */
2487 /* offset = base + [index * scale] + displacement */
2488 off = exit_qualification; /* holds the displacement */
2489 + if (addr_size == 1)
2490 + off = (gva_t)sign_extend64(off, 31);
2491 + else if (addr_size == 0)
2492 + off = (gva_t)sign_extend64(off, 15);
2493 if (base_is_valid)
2494 off += kvm_register_read(vcpu, base_reg);
2495 if (index_is_valid)
2496 off += kvm_register_read(vcpu, index_reg)<<scaling;
2497 vmx_get_segment(vcpu, &s, seg_reg);
2498 - *ret = s.base + off;
2499
2500 + /*
2501 + * The effective address, i.e. @off, of a memory operand is truncated
2502 + * based on the address size of the instruction. Note that this is
2503 + * the *effective address*, i.e. the address prior to accounting for
2504 + * the segment's base.
2505 + */
2506 if (addr_size == 1) /* 32 bit */
2507 - *ret &= 0xffffffff;
2508 + off &= 0xffffffff;
2509 + else if (addr_size == 0) /* 16 bit */
2510 + off &= 0xffff;
2511
2512 /* Checks for #GP/#SS exceptions. */
2513 exn = false;
2514 if (is_long_mode(vcpu)) {
2515 + /*
2516 + * The virtual/linear address is never truncated in 64-bit
2517 + * mode, e.g. a 32-bit address size can yield a 64-bit virtual
2518 + * address when using FS/GS with a non-zero base.
2519 + */
2520 + *ret = s.base + off;
2521 +
2522 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
2523 * non-canonical form. This is the only check on the memory
2524 * destination for long mode!
2525 */
2526 exn = is_noncanonical_address(*ret, vcpu);
2527 } else if (is_protmode(vcpu)) {
2528 + /*
2529 + * When not in long mode, the virtual/linear address is
2530 + * unconditionally truncated to 32 bits regardless of the
2531 + * address size.
2532 + */
2533 + *ret = (s.base + off) & 0xffffffff;
2534 +
2535 /* Protected mode: apply checks for segment validity in the
2536 * following order:
2537 * - segment type check (#GP(0) may be thrown)
2538 @@ -8226,10 +8251,16 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
2539 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
2540 */
2541 exn = (s.unusable != 0);
2542 - /* Protected mode: #GP(0)/#SS(0) if the memory
2543 - * operand is outside the segment limit.
2544 +
2545 + /*
2546 + * Protected mode: #GP(0)/#SS(0) if the memory operand is
2547 + * outside the segment limit. All CPUs that support VMX ignore
2548 + * limit checks for flat segments, i.e. segments with base==0,
2549 + * limit==0xffffffff and of type expand-up data or code.
2550 */
2551 - exn = exn || (off + sizeof(u64) > s.limit);
2552 + if (!(s.base == 0 && s.limit == 0xffffffff &&
2553 + ((s.type & 8) || !(s.type & 4))))
2554 + exn = exn || (off + sizeof(u64) > s.limit);
2555 }
2556 if (exn) {
2557 kvm_queue_exception_e(vcpu,
2558 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2559 index 3a7cf7c6b28a..6181ec19bed2 100644
2560 --- a/arch/x86/kvm/x86.c
2561 +++ b/arch/x86/kvm/x86.c
2562 @@ -9108,13 +9108,13 @@ out_free:
2563 return -ENOMEM;
2564 }
2565
2566 -void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
2567 +void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
2568 {
2569 /*
2570 * memslots->generation has been incremented.
2571 * mmio generation may have reached its maximum value.
2572 */
2573 - kvm_mmu_invalidate_mmio_sptes(kvm, slots);
2574 + kvm_mmu_invalidate_mmio_sptes(kvm, gen);
2575 }
2576
2577 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2578 diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
2579 index 67b9568613f3..1826ed9dd1c8 100644
2580 --- a/arch/x86/kvm/x86.h
2581 +++ b/arch/x86/kvm/x86.h
2582 @@ -181,6 +181,11 @@ static inline bool emul_is_noncanonical_address(u64 la,
2583 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
2584 gva_t gva, gfn_t gfn, unsigned access)
2585 {
2586 + u64 gen = kvm_memslots(vcpu->kvm)->generation;
2587 +
2588 + if (unlikely(gen & 1))
2589 + return;
2590 +
2591 /*
2592 * If this is a shadow nested page table, the "GVA" is
2593 * actually a nGPA.
2594 @@ -188,7 +193,7 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
2595 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
2596 vcpu->arch.access = access;
2597 vcpu->arch.mmio_gfn = gfn;
2598 - vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
2599 + vcpu->arch.mmio_gen = gen;
2600 }
2601
2602 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
2603 diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
2604 index c8f011e07a15..73aa0b89a74a 100644
2605 --- a/arch/x86/xen/mmu_pv.c
2606 +++ b/arch/x86/xen/mmu_pv.c
2607 @@ -2106,10 +2106,10 @@ void __init xen_relocate_p2m(void)
2608 pt = early_memremap(pt_phys, PAGE_SIZE);
2609 clear_page(pt);
2610 for (idx_pte = 0;
2611 - idx_pte < min(n_pte, PTRS_PER_PTE);
2612 - idx_pte++) {
2613 - set_pte(pt + idx_pte,
2614 - pfn_pte(p2m_pfn, PAGE_KERNEL));
2615 + idx_pte < min(n_pte, PTRS_PER_PTE);
2616 + idx_pte++) {
2617 + pt[idx_pte] = pfn_pte(p2m_pfn,
2618 + PAGE_KERNEL);
2619 p2m_pfn++;
2620 }
2621 n_pte -= PTRS_PER_PTE;
2622 @@ -2117,8 +2117,7 @@ void __init xen_relocate_p2m(void)
2623 make_lowmem_page_readonly(__va(pt_phys));
2624 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
2625 PFN_DOWN(pt_phys));
2626 - set_pmd(pmd + idx_pt,
2627 - __pmd(_PAGE_TABLE | pt_phys));
2628 + pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
2629 pt_phys += PAGE_SIZE;
2630 }
2631 n_pt -= PTRS_PER_PMD;
2632 @@ -2126,7 +2125,7 @@ void __init xen_relocate_p2m(void)
2633 make_lowmem_page_readonly(__va(pmd_phys));
2634 pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
2635 PFN_DOWN(pmd_phys));
2636 - set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
2637 + pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
2638 pmd_phys += PAGE_SIZE;
2639 }
2640 n_pmd -= PTRS_PER_PUD;
2641 diff --git a/block/blk-mq.c b/block/blk-mq.c
2642 index 23a53b67cf0d..7d53f2314d7c 100644
2643 --- a/block/blk-mq.c
2644 +++ b/block/blk-mq.c
2645 @@ -701,12 +701,20 @@ static void blk_mq_requeue_work(struct work_struct *work)
2646 spin_unlock_irq(&q->requeue_lock);
2647
2648 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
2649 - if (!(rq->rq_flags & RQF_SOFTBARRIER))
2650 + if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
2651 continue;
2652
2653 rq->rq_flags &= ~RQF_SOFTBARRIER;
2654 list_del_init(&rq->queuelist);
2655 - blk_mq_sched_insert_request(rq, true, false, false);
2656 + /*
2657 + * If RQF_DONTPREP, rq has contained some driver specific
2658 + * data, so insert it to hctx dispatch list to avoid any
2659 + * merge.
2660 + */
2661 + if (rq->rq_flags & RQF_DONTPREP)
2662 + blk_mq_request_bypass_insert(rq, false);
2663 + else
2664 + blk_mq_sched_insert_request(rq, true, false, false);
2665 }
2666
2667 while (!list_empty(&rq_list)) {
2668 diff --git a/crypto/aead.c b/crypto/aead.c
2669 index 60b3bbe973e7..9688ada13981 100644
2670 --- a/crypto/aead.c
2671 +++ b/crypto/aead.c
2672 @@ -61,8 +61,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
2673 else
2674 err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
2675
2676 - if (err)
2677 + if (unlikely(err)) {
2678 + crypto_aead_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
2679 return err;
2680 + }
2681
2682 crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
2683 return 0;
2684 diff --git a/crypto/aegis128.c b/crypto/aegis128.c
2685 index c22f4414856d..789716f92e4c 100644
2686 --- a/crypto/aegis128.c
2687 +++ b/crypto/aegis128.c
2688 @@ -290,19 +290,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state,
2689 const struct aegis128_ops *ops)
2690 {
2691 struct skcipher_walk walk;
2692 - u8 *src, *dst;
2693 - unsigned int chunksize;
2694
2695 ops->skcipher_walk_init(&walk, req, false);
2696
2697 while (walk.nbytes) {
2698 - src = walk.src.virt.addr;
2699 - dst = walk.dst.virt.addr;
2700 - chunksize = walk.nbytes;
2701 + unsigned int nbytes = walk.nbytes;
2702
2703 - ops->crypt_chunk(state, dst, src, chunksize);
2704 + if (nbytes < walk.total)
2705 + nbytes = round_down(nbytes, walk.stride);
2706
2707 - skcipher_walk_done(&walk, 0);
2708 + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
2709 + nbytes);
2710 +
2711 + skcipher_walk_done(&walk, walk.nbytes - nbytes);
2712 }
2713 }
2714
2715 diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
2716 index b6fb21ebdc3e..73811448cb6b 100644
2717 --- a/crypto/aegis128l.c
2718 +++ b/crypto/aegis128l.c
2719 @@ -353,19 +353,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state,
2720 const struct aegis128l_ops *ops)
2721 {
2722 struct skcipher_walk walk;
2723 - u8 *src, *dst;
2724 - unsigned int chunksize;
2725
2726 ops->skcipher_walk_init(&walk, req, false);
2727
2728 while (walk.nbytes) {
2729 - src = walk.src.virt.addr;
2730 - dst = walk.dst.virt.addr;
2731 - chunksize = walk.nbytes;
2732 + unsigned int nbytes = walk.nbytes;
2733
2734 - ops->crypt_chunk(state, dst, src, chunksize);
2735 + if (nbytes < walk.total)
2736 + nbytes = round_down(nbytes, walk.stride);
2737
2738 - skcipher_walk_done(&walk, 0);
2739 + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
2740 + nbytes);
2741 +
2742 + skcipher_walk_done(&walk, walk.nbytes - nbytes);
2743 }
2744 }
2745
2746 diff --git a/crypto/aegis256.c b/crypto/aegis256.c
2747 index 11f0f8ec9c7c..8a71e9c06193 100644
2748 --- a/crypto/aegis256.c
2749 +++ b/crypto/aegis256.c
2750 @@ -303,19 +303,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state,
2751 const struct aegis256_ops *ops)
2752 {
2753 struct skcipher_walk walk;
2754 - u8 *src, *dst;
2755 - unsigned int chunksize;
2756
2757 ops->skcipher_walk_init(&walk, req, false);
2758
2759 while (walk.nbytes) {
2760 - src = walk.src.virt.addr;
2761 - dst = walk.dst.virt.addr;
2762 - chunksize = walk.nbytes;
2763 + unsigned int nbytes = walk.nbytes;
2764
2765 - ops->crypt_chunk(state, dst, src, chunksize);
2766 + if (nbytes < walk.total)
2767 + nbytes = round_down(nbytes, walk.stride);
2768
2769 - skcipher_walk_done(&walk, 0);
2770 + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
2771 + nbytes);
2772 +
2773 + skcipher_walk_done(&walk, walk.nbytes - nbytes);
2774 }
2775 }
2776
2777 diff --git a/crypto/ahash.c b/crypto/ahash.c
2778 index a64c143165b1..158e716f21a1 100644
2779 --- a/crypto/ahash.c
2780 +++ b/crypto/ahash.c
2781 @@ -86,17 +86,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
2782 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
2783 {
2784 unsigned int alignmask = walk->alignmask;
2785 - unsigned int nbytes = walk->entrylen;
2786
2787 walk->data -= walk->offset;
2788
2789 - if (nbytes && walk->offset & alignmask && !err) {
2790 - walk->offset = ALIGN(walk->offset, alignmask + 1);
2791 - nbytes = min(nbytes,
2792 - ((unsigned int)(PAGE_SIZE)) - walk->offset);
2793 - walk->entrylen -= nbytes;
2794 + if (walk->entrylen && (walk->offset & alignmask) && !err) {
2795 + unsigned int nbytes;
2796
2797 + walk->offset = ALIGN(walk->offset, alignmask + 1);
2798 + nbytes = min(walk->entrylen,
2799 + (unsigned int)(PAGE_SIZE - walk->offset));
2800 if (nbytes) {
2801 + walk->entrylen -= nbytes;
2802 walk->data += walk->offset;
2803 return nbytes;
2804 }
2805 @@ -116,7 +116,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
2806 if (err)
2807 return err;
2808
2809 - if (nbytes) {
2810 + if (walk->entrylen) {
2811 walk->offset = 0;
2812 walk->pg++;
2813 return hash_walk_next(walk);
2814 @@ -190,6 +190,21 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
2815 return ret;
2816 }
2817
2818 +static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
2819 + unsigned int keylen)
2820 +{
2821 + return -ENOSYS;
2822 +}
2823 +
2824 +static void ahash_set_needkey(struct crypto_ahash *tfm)
2825 +{
2826 + const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
2827 +
2828 + if (tfm->setkey != ahash_nosetkey &&
2829 + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
2830 + crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
2831 +}
2832 +
2833 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2834 unsigned int keylen)
2835 {
2836 @@ -201,20 +216,16 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2837 else
2838 err = tfm->setkey(tfm, key, keylen);
2839
2840 - if (err)
2841 + if (unlikely(err)) {
2842 + ahash_set_needkey(tfm);
2843 return err;
2844 + }
2845
2846 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
2847 return 0;
2848 }
2849 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
2850
2851 -static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
2852 - unsigned int keylen)
2853 -{
2854 - return -ENOSYS;
2855 -}
2856 -
2857 static inline unsigned int ahash_align_buffer_size(unsigned len,
2858 unsigned long mask)
2859 {
2860 @@ -467,8 +478,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
2861
2862 if (alg->setkey) {
2863 hash->setkey = alg->setkey;
2864 - if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
2865 - crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
2866 + ahash_set_needkey(hash);
2867 }
2868
2869 return 0;
2870 diff --git a/crypto/cfb.c b/crypto/cfb.c
2871 index e81e45673498..4abfe32ff845 100644
2872 --- a/crypto/cfb.c
2873 +++ b/crypto/cfb.c
2874 @@ -77,12 +77,14 @@ static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
2875 do {
2876 crypto_cfb_encrypt_one(tfm, iv, dst);
2877 crypto_xor(dst, src, bsize);
2878 - memcpy(iv, dst, bsize);
2879 + iv = dst;
2880
2881 src += bsize;
2882 dst += bsize;
2883 } while ((nbytes -= bsize) >= bsize);
2884
2885 + memcpy(walk->iv, iv, bsize);
2886 +
2887 return nbytes;
2888 }
2889
2890 @@ -162,7 +164,7 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
2891 const unsigned int bsize = crypto_cfb_bsize(tfm);
2892 unsigned int nbytes = walk->nbytes;
2893 u8 *src = walk->src.virt.addr;
2894 - u8 *iv = walk->iv;
2895 + u8 * const iv = walk->iv;
2896 u8 tmp[MAX_CIPHER_BLOCKSIZE];
2897
2898 do {
2899 @@ -172,8 +174,6 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
2900 src += bsize;
2901 } while ((nbytes -= bsize) >= bsize);
2902
2903 - memcpy(walk->iv, iv, bsize);
2904 -
2905 return nbytes;
2906 }
2907
2908 @@ -298,6 +298,12 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
2909 inst->alg.base.cra_blocksize = 1;
2910 inst->alg.base.cra_alignmask = alg->cra_alignmask;
2911
2912 + /*
2913 + * To simplify the implementation, configure the skcipher walk to only
2914 + * give a partial block at the very end, never earlier.
2915 + */
2916 + inst->alg.chunksize = alg->cra_blocksize;
2917 +
2918 inst->alg.ivsize = alg->cra_blocksize;
2919 inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
2920 inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
2921 diff --git a/crypto/morus1280.c b/crypto/morus1280.c
2922 index 3889c188f266..b83576b4eb55 100644
2923 --- a/crypto/morus1280.c
2924 +++ b/crypto/morus1280.c
2925 @@ -366,18 +366,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state,
2926 const struct morus1280_ops *ops)
2927 {
2928 struct skcipher_walk walk;
2929 - u8 *dst;
2930 - const u8 *src;
2931
2932 ops->skcipher_walk_init(&walk, req, false);
2933
2934 while (walk.nbytes) {
2935 - src = walk.src.virt.addr;
2936 - dst = walk.dst.virt.addr;
2937 + unsigned int nbytes = walk.nbytes;
2938
2939 - ops->crypt_chunk(state, dst, src, walk.nbytes);
2940 + if (nbytes < walk.total)
2941 + nbytes = round_down(nbytes, walk.stride);
2942
2943 - skcipher_walk_done(&walk, 0);
2944 + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
2945 + nbytes);
2946 +
2947 + skcipher_walk_done(&walk, walk.nbytes - nbytes);
2948 }
2949 }
2950
2951 diff --git a/crypto/morus640.c b/crypto/morus640.c
2952 index da06ec2f6a80..b6a477444f6d 100644
2953 --- a/crypto/morus640.c
2954 +++ b/crypto/morus640.c
2955 @@ -365,18 +365,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state,
2956 const struct morus640_ops *ops)
2957 {
2958 struct skcipher_walk walk;
2959 - u8 *dst;
2960 - const u8 *src;
2961
2962 ops->skcipher_walk_init(&walk, req, false);
2963
2964 while (walk.nbytes) {
2965 - src = walk.src.virt.addr;
2966 - dst = walk.dst.virt.addr;
2967 + unsigned int nbytes = walk.nbytes;
2968
2969 - ops->crypt_chunk(state, dst, src, walk.nbytes);
2970 + if (nbytes < walk.total)
2971 + nbytes = round_down(nbytes, walk.stride);
2972
2973 - skcipher_walk_done(&walk, 0);
2974 + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
2975 + nbytes);
2976 +
2977 + skcipher_walk_done(&walk, walk.nbytes - nbytes);
2978 }
2979 }
2980
2981 diff --git a/crypto/pcbc.c b/crypto/pcbc.c
2982 index 8aa10144407c..1b182dfedc94 100644
2983 --- a/crypto/pcbc.c
2984 +++ b/crypto/pcbc.c
2985 @@ -51,7 +51,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
2986 unsigned int nbytes = walk->nbytes;
2987 u8 *src = walk->src.virt.addr;
2988 u8 *dst = walk->dst.virt.addr;
2989 - u8 *iv = walk->iv;
2990 + u8 * const iv = walk->iv;
2991
2992 do {
2993 crypto_xor(iv, src, bsize);
2994 @@ -72,7 +72,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
2995 int bsize = crypto_cipher_blocksize(tfm);
2996 unsigned int nbytes = walk->nbytes;
2997 u8 *src = walk->src.virt.addr;
2998 - u8 *iv = walk->iv;
2999 + u8 * const iv = walk->iv;
3000 u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
3001
3002 do {
3003 @@ -84,8 +84,6 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
3004 src += bsize;
3005 } while ((nbytes -= bsize) >= bsize);
3006
3007 - memcpy(walk->iv, iv, bsize);
3008 -
3009 return nbytes;
3010 }
3011
3012 @@ -121,7 +119,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
3013 unsigned int nbytes = walk->nbytes;
3014 u8 *src = walk->src.virt.addr;
3015 u8 *dst = walk->dst.virt.addr;
3016 - u8 *iv = walk->iv;
3017 + u8 * const iv = walk->iv;
3018
3019 do {
3020 crypto_cipher_decrypt_one(tfm, dst, src);
3021 @@ -132,8 +130,6 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
3022 dst += bsize;
3023 } while ((nbytes -= bsize) >= bsize);
3024
3025 - memcpy(walk->iv, iv, bsize);
3026 -
3027 return nbytes;
3028 }
3029
3030 @@ -144,7 +140,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
3031 int bsize = crypto_cipher_blocksize(tfm);
3032 unsigned int nbytes = walk->nbytes;
3033 u8 *src = walk->src.virt.addr;
3034 - u8 *iv = walk->iv;
3035 + u8 * const iv = walk->iv;
3036 u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
3037
3038 do {
3039 @@ -156,8 +152,6 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
3040 src += bsize;
3041 } while ((nbytes -= bsize) >= bsize);
3042
3043 - memcpy(walk->iv, iv, bsize);
3044 -
3045 return nbytes;
3046 }
3047
3048 diff --git a/crypto/shash.c b/crypto/shash.c
3049 index 5d732c6bb4b2..a04145e5306a 100644
3050 --- a/crypto/shash.c
3051 +++ b/crypto/shash.c
3052 @@ -53,6 +53,13 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
3053 return err;
3054 }
3055
3056 +static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
3057 +{
3058 + if (crypto_shash_alg_has_setkey(alg) &&
3059 + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
3060 + crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
3061 +}
3062 +
3063 int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
3064 unsigned int keylen)
3065 {
3066 @@ -65,8 +72,10 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
3067 else
3068 err = shash->setkey(tfm, key, keylen);
3069
3070 - if (err)
3071 + if (unlikely(err)) {
3072 + shash_set_needkey(tfm, shash);
3073 return err;
3074 + }
3075
3076 crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
3077 return 0;
3078 @@ -368,7 +377,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
3079 crt->final = shash_async_final;
3080 crt->finup = shash_async_finup;
3081 crt->digest = shash_async_digest;
3082 - crt->setkey = shash_async_setkey;
3083 + if (crypto_shash_alg_has_setkey(alg))
3084 + crt->setkey = shash_async_setkey;
3085
3086 crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
3087 CRYPTO_TFM_NEED_KEY);
3088 @@ -390,9 +400,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
3089
3090 hash->descsize = alg->descsize;
3091
3092 - if (crypto_shash_alg_has_setkey(alg) &&
3093 - !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
3094 - crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
3095 + shash_set_needkey(hash, alg);
3096
3097 return 0;
3098 }
3099 diff --git a/crypto/skcipher.c b/crypto/skcipher.c
3100 index 0bd8c6caa498..46bb300d418f 100644
3101 --- a/crypto/skcipher.c
3102 +++ b/crypto/skcipher.c
3103 @@ -584,6 +584,12 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
3104 return crypto_alg_extsize(alg);
3105 }
3106
3107 +static void skcipher_set_needkey(struct crypto_skcipher *tfm)
3108 +{
3109 + if (tfm->keysize)
3110 + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
3111 +}
3112 +
3113 static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
3114 const u8 *key, unsigned int keylen)
3115 {
3116 @@ -597,8 +603,10 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
3117 err = crypto_blkcipher_setkey(blkcipher, key, keylen);
3118 crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
3119 CRYPTO_TFM_RES_MASK);
3120 - if (err)
3121 + if (unlikely(err)) {
3122 + skcipher_set_needkey(tfm);
3123 return err;
3124 + }
3125
3126 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
3127 return 0;
3128 @@ -676,8 +684,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
3129 skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
3130 skcipher->keysize = calg->cra_blkcipher.max_keysize;
3131
3132 - if (skcipher->keysize)
3133 - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
3134 + skcipher_set_needkey(skcipher);
3135
3136 return 0;
3137 }
3138 @@ -697,8 +704,10 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
3139 crypto_skcipher_set_flags(tfm,
3140 crypto_ablkcipher_get_flags(ablkcipher) &
3141 CRYPTO_TFM_RES_MASK);
3142 - if (err)
3143 + if (unlikely(err)) {
3144 + skcipher_set_needkey(tfm);
3145 return err;
3146 + }
3147
3148 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
3149 return 0;
3150 @@ -775,8 +784,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
3151 sizeof(struct ablkcipher_request);
3152 skcipher->keysize = calg->cra_ablkcipher.max_keysize;
3153
3154 - if (skcipher->keysize)
3155 - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
3156 + skcipher_set_needkey(skcipher);
3157
3158 return 0;
3159 }
3160 @@ -819,8 +827,10 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
3161 else
3162 err = cipher->setkey(tfm, key, keylen);
3163
3164 - if (err)
3165 + if (unlikely(err)) {
3166 + skcipher_set_needkey(tfm);
3167 return err;
3168 + }
3169
3170 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
3171 return 0;
3172 @@ -852,8 +862,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
3173 skcipher->ivsize = alg->ivsize;
3174 skcipher->keysize = alg->max_keysize;
3175
3176 - if (skcipher->keysize)
3177 - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
3178 + skcipher_set_needkey(skcipher);
3179
3180 if (alg->exit)
3181 skcipher->base.exit = crypto_skcipher_exit_tfm;
3182 diff --git a/crypto/testmgr.c b/crypto/testmgr.c
3183 index 54d882ffe438..3664c26f4838 100644
3184 --- a/crypto/testmgr.c
3185 +++ b/crypto/testmgr.c
3186 @@ -1894,14 +1894,21 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
3187
3188 err = alg_test_hash(desc, driver, type, mask);
3189 if (err)
3190 - goto out;
3191 + return err;
3192
3193 tfm = crypto_alloc_shash(driver, type, mask);
3194 if (IS_ERR(tfm)) {
3195 + if (PTR_ERR(tfm) == -ENOENT) {
3196 + /*
3197 + * This crc32c implementation is only available through
3198 + * ahash API, not the shash API, so the remaining part
3199 + * of the test is not applicable to it.
3200 + */
3201 + return 0;
3202 + }
3203 printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
3204 "%ld\n", driver, PTR_ERR(tfm));
3205 - err = PTR_ERR(tfm);
3206 - goto out;
3207 + return PTR_ERR(tfm);
3208 }
3209
3210 do {
3211 @@ -1928,7 +1935,6 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
3212
3213 crypto_free_shash(tfm);
3214
3215 -out:
3216 return err;
3217 }
3218
3219 diff --git a/crypto/testmgr.h b/crypto/testmgr.h
3220 index 11e6f17fe724..862ee1d04263 100644
3221 --- a/crypto/testmgr.h
3222 +++ b/crypto/testmgr.h
3223 @@ -11416,6 +11416,31 @@ static const struct cipher_testvec aes_cfb_tv_template[] = {
3224 "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
3225 "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
3226 .len = 64,
3227 + .also_non_np = 1,
3228 + .np = 2,
3229 + .tap = { 31, 33 },
3230 + }, { /* > 16 bytes, not a multiple of 16 bytes */
3231 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
3232 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
3233 + .klen = 16,
3234 + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
3235 + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
3236 + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
3237 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
3238 + "\xae",
3239 + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
3240 + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
3241 + "\xc8",
3242 + .len = 17,
3243 + }, { /* < 16 bytes */
3244 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
3245 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
3246 + .klen = 16,
3247 + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
3248 + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
3249 + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
3250 + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
3251 + .len = 7,
3252 },
3253 };
3254
3255 diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
3256 index 545e91420cde..8940054d6250 100644
3257 --- a/drivers/acpi/device_sysfs.c
3258 +++ b/drivers/acpi/device_sysfs.c
3259 @@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
3260 {
3261 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
3262 const union acpi_object *of_compatible, *obj;
3263 + acpi_status status;
3264 int len, count;
3265 int i, nval;
3266 char *c;
3267
3268 - acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
3269 + status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
3270 + if (ACPI_FAILURE(status))
3271 + return -ENODEV;
3272 +
3273 /* DT strings are all in lower case */
3274 for (c = buf.pointer; *c != '\0'; c++)
3275 *c = tolower(*c);
3276 diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
3277 index f530d3541242..df2175b1169a 100644
3278 --- a/drivers/acpi/nfit/core.c
3279 +++ b/drivers/acpi/nfit/core.c
3280 @@ -397,7 +397,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
3281 if (call_pkg) {
3282 int i;
3283
3284 - if (nfit_mem->family != call_pkg->nd_family)
3285 + if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
3286 return -ENOTTY;
3287
3288 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
3289 @@ -406,6 +406,10 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
3290 return call_pkg->nd_command;
3291 }
3292
3293 + /* In the !call_pkg case, bus commands == bus functions */
3294 + if (!nfit_mem)
3295 + return cmd;
3296 +
3297 /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
3298 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
3299 return cmd;
3300 @@ -436,17 +440,18 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
3301 if (cmd_rc)
3302 *cmd_rc = -EINVAL;
3303
3304 + if (cmd == ND_CMD_CALL)
3305 + call_pkg = buf;
3306 + func = cmd_to_func(nfit_mem, cmd, call_pkg);
3307 + if (func < 0)
3308 + return func;
3309 +
3310 if (nvdimm) {
3311 struct acpi_device *adev = nfit_mem->adev;
3312
3313 if (!adev)
3314 return -ENOTTY;
3315
3316 - if (cmd == ND_CMD_CALL)
3317 - call_pkg = buf;
3318 - func = cmd_to_func(nfit_mem, cmd, call_pkg);
3319 - if (func < 0)
3320 - return func;
3321 dimm_name = nvdimm_name(nvdimm);
3322 cmd_name = nvdimm_cmd_name(cmd);
3323 cmd_mask = nvdimm_cmd_mask(nvdimm);
3324 @@ -457,12 +462,9 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
3325 } else {
3326 struct acpi_device *adev = to_acpi_dev(acpi_desc);
3327
3328 - func = cmd;
3329 cmd_name = nvdimm_bus_cmd_name(cmd);
3330 cmd_mask = nd_desc->cmd_mask;
3331 - dsm_mask = cmd_mask;
3332 - if (cmd == ND_CMD_CALL)
3333 - dsm_mask = nd_desc->bus_dsm_mask;
3334 + dsm_mask = nd_desc->bus_dsm_mask;
3335 desc = nd_cmd_bus_desc(cmd);
3336 guid = to_nfit_uuid(NFIT_DEV_BUS);
3337 handle = adev->handle;
3338 @@ -533,6 +535,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
3339 return -EINVAL;
3340 }
3341
3342 + if (out_obj->type != ACPI_TYPE_BUFFER) {
3343 + dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
3344 + dimm_name, cmd_name, out_obj->type);
3345 + rc = -EINVAL;
3346 + goto out;
3347 + }
3348 +
3349 if (call_pkg) {
3350 call_pkg->nd_fw_size = out_obj->buffer.length;
3351 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
3352 @@ -551,13 +560,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
3353 return 0;
3354 }
3355
3356 - if (out_obj->package.type != ACPI_TYPE_BUFFER) {
3357 - dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
3358 - dimm_name, cmd_name, out_obj->type);
3359 - rc = -EINVAL;
3360 - goto out;
3361 - }
3362 -
3363 dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
3364 cmd_name, out_obj->buffer.length);
3365 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
3366 @@ -2890,14 +2892,16 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
3367 {
3368 int rc;
3369
3370 - if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
3371 + if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3372 return acpi_nfit_register_region(acpi_desc, nfit_spa);
3373
3374 set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
3375 - set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
3376 + if (!no_init_ars)
3377 + set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
3378
3379 switch (acpi_nfit_query_poison(acpi_desc)) {
3380 case 0:
3381 + case -ENOSPC:
3382 case -EAGAIN:
3383 rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
3384 /* shouldn't happen, try again later */
3385 @@ -2922,7 +2926,6 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
3386 break;
3387 case -EBUSY:
3388 case -ENOMEM:
3389 - case -ENOSPC:
3390 /*
3391 * BIOS was using ARS, wait for it to complete (or
3392 * resources to become available) and then perform our
3393 diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
3394 index a43276c76fc6..21393ec3b9a4 100644
3395 --- a/drivers/auxdisplay/ht16k33.c
3396 +++ b/drivers/auxdisplay/ht16k33.c
3397 @@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client)
3398 struct ht16k33_priv *priv = i2c_get_clientdata(client);
3399 struct ht16k33_fbdev *fbdev = &priv->fbdev;
3400
3401 - cancel_delayed_work(&fbdev->work);
3402 + cancel_delayed_work_sync(&fbdev->work);
3403 unregister_framebuffer(fbdev->info);
3404 framebuffer_release(fbdev->info);
3405 free_page((unsigned long) fbdev->buffer);
3406 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
3407 index 5fa1898755a3..7c84f64c74f7 100644
3408 --- a/drivers/base/power/wakeup.c
3409 +++ b/drivers/base/power/wakeup.c
3410 @@ -118,7 +118,6 @@ void wakeup_source_drop(struct wakeup_source *ws)
3411 if (!ws)
3412 return;
3413
3414 - del_timer_sync(&ws->timer);
3415 __pm_relax(ws);
3416 }
3417 EXPORT_SYMBOL_GPL(wakeup_source_drop);
3418 @@ -205,6 +204,13 @@ void wakeup_source_remove(struct wakeup_source *ws)
3419 list_del_rcu(&ws->entry);
3420 raw_spin_unlock_irqrestore(&events_lock, flags);
3421 synchronize_srcu(&wakeup_srcu);
3422 +
3423 + del_timer_sync(&ws->timer);
3424 + /*
3425 + * Clear timer.function to make wakeup_source_not_registered() treat
3426 + * this wakeup source as not registered.
3427 + */
3428 + ws->timer.function = NULL;
3429 }
3430 EXPORT_SYMBOL_GPL(wakeup_source_remove);
3431
3432 diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
3433 index fdabd0b74492..a8de56f1936d 100644
3434 --- a/drivers/block/floppy.c
3435 +++ b/drivers/block/floppy.c
3436 @@ -4084,7 +4084,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
3437
3438 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
3439 if (lock_fdc(drive))
3440 - return -EINTR;
3441 + return 0;
3442 poll_drive(false, 0);
3443 process_fd_request();
3444 }
3445 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
3446 index 5faa917df1b6..82d831b103f9 100644
3447 --- a/drivers/char/ipmi/ipmi_si_intf.c
3448 +++ b/drivers/char/ipmi/ipmi_si_intf.c
3449 @@ -2085,6 +2085,11 @@ static int try_smi_init(struct smi_info *new_smi)
3450 WARN_ON(new_smi->io.dev->init_name != NULL);
3451
3452 out_err:
3453 + if (rv && new_smi->io.io_cleanup) {
3454 + new_smi->io.io_cleanup(&new_smi->io);
3455 + new_smi->io.io_cleanup = NULL;
3456 + }
3457 +
3458 kfree(init_name);
3459 return rv;
3460 }
3461 diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c
3462 index 1b869d530884..638f4ab88f44 100644
3463 --- a/drivers/char/ipmi/ipmi_si_mem_io.c
3464 +++ b/drivers/char/ipmi/ipmi_si_mem_io.c
3465 @@ -81,8 +81,6 @@ int ipmi_si_mem_setup(struct si_sm_io *io)
3466 if (!addr)
3467 return -ENODEV;
3468
3469 - io->io_cleanup = mem_cleanup;
3470 -
3471 /*
3472 * Figure out the actual readb/readw/readl/etc routine to use based
3473 * upon the register size.
3474 @@ -141,5 +139,8 @@ int ipmi_si_mem_setup(struct si_sm_io *io)
3475 mem_region_cleanup(io, io->io_size);
3476 return -EIO;
3477 }
3478 +
3479 + io->io_cleanup = mem_cleanup;
3480 +
3481 return 0;
3482 }
3483 diff --git a/drivers/char/ipmi/ipmi_si_port_io.c b/drivers/char/ipmi/ipmi_si_port_io.c
3484 index ef6dffcea9fa..03924c32b6e9 100644
3485 --- a/drivers/char/ipmi/ipmi_si_port_io.c
3486 +++ b/drivers/char/ipmi/ipmi_si_port_io.c
3487 @@ -68,8 +68,6 @@ int ipmi_si_port_setup(struct si_sm_io *io)
3488 if (!addr)
3489 return -ENODEV;
3490
3491 - io->io_cleanup = port_cleanup;
3492 -
3493 /*
3494 * Figure out the actual inb/inw/inl/etc routine to use based
3495 * upon the register size.
3496 @@ -109,5 +107,8 @@ int ipmi_si_port_setup(struct si_sm_io *io)
3497 return -EIO;
3498 }
3499 }
3500 +
3501 + io->io_cleanup = port_cleanup;
3502 +
3503 return 0;
3504 }
3505 diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
3506 index abd675bec88c..694fc58888c1 100644
3507 --- a/drivers/char/tpm/st33zp24/st33zp24.c
3508 +++ b/drivers/char/tpm/st33zp24/st33zp24.c
3509 @@ -436,7 +436,7 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
3510 goto out_err;
3511 }
3512
3513 - return len;
3514 + return 0;
3515 out_err:
3516 st33zp24_cancel(chip);
3517 release_locality(chip);
3518 diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
3519 index 1010cb79dcc6..43c3f9b87614 100644
3520 --- a/drivers/char/tpm/tpm-interface.c
3521 +++ b/drivers/char/tpm/tpm-interface.c
3522 @@ -495,10 +495,19 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
3523 if (rc < 0) {
3524 if (rc != -EPIPE)
3525 dev_err(&chip->dev,
3526 - "%s: tpm_send: error %d\n", __func__, rc);
3527 + "%s: send(): error %d\n", __func__, rc);
3528 goto out;
3529 }
3530
3531 + /* A sanity check. send() should just return zero on success e.g.
3532 + * not the command length.
3533 + */
3534 + if (rc > 0) {
3535 + dev_warn(&chip->dev,
3536 + "%s: send(): invalid value %d\n", __func__, rc);
3537 + rc = 0;
3538 + }
3539 +
3540 if (chip->flags & TPM_CHIP_FLAG_IRQ)
3541 goto out_recv;
3542
3543 diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
3544 index 66a14526aaf4..a290b30a0c35 100644
3545 --- a/drivers/char/tpm/tpm_atmel.c
3546 +++ b/drivers/char/tpm/tpm_atmel.c
3547 @@ -105,7 +105,7 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
3548 iowrite8(buf[i], priv->iobase);
3549 }
3550
3551 - return count;
3552 + return 0;
3553 }
3554
3555 static void tpm_atml_cancel(struct tpm_chip *chip)
3556 diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
3557 index 36952ef98f90..763fc7e6c005 100644
3558 --- a/drivers/char/tpm/tpm_crb.c
3559 +++ b/drivers/char/tpm/tpm_crb.c
3560 @@ -287,19 +287,29 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
3561 struct crb_priv *priv = dev_get_drvdata(&chip->dev);
3562 unsigned int expected;
3563
3564 - /* sanity check */
3565 - if (count < 6)
3566 + /* A sanity check that the upper layer wants to get at least the header
3567 + * as that is the minimum size for any TPM response.
3568 + */
3569 + if (count < TPM_HEADER_SIZE)
3570 return -EIO;
3571
3572 + /* If this bit is set, according to the spec, the TPM is in
3573 + * unrecoverable condition.
3574 + */
3575 if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR)
3576 return -EIO;
3577
3578 - memcpy_fromio(buf, priv->rsp, 6);
3579 - expected = be32_to_cpup((__be32 *) &buf[2]);
3580 - if (expected > count || expected < 6)
3581 + /* Read the first 8 bytes in order to get the length of the response.
3582 + * We read exactly a quad word in order to make sure that the remaining
3583 + * reads will be aligned.
3584 + */
3585 + memcpy_fromio(buf, priv->rsp, 8);
3586 +
3587 + expected = be32_to_cpup((__be32 *)&buf[2]);
3588 + if (expected > count || expected < TPM_HEADER_SIZE)
3589 return -EIO;
3590
3591 - memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
3592 + memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8);
3593
3594 return expected;
3595 }
3596 diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
3597 index 95ce2e9ccdc6..32a8e27c5382 100644
3598 --- a/drivers/char/tpm/tpm_i2c_atmel.c
3599 +++ b/drivers/char/tpm/tpm_i2c_atmel.c
3600 @@ -65,7 +65,11 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
3601 dev_dbg(&chip->dev,
3602 "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
3603 (int)min_t(size_t, 64, len), buf, len, status);
3604 - return status;
3605 +
3606 + if (status < 0)
3607 + return status;
3608 +
3609 + return 0;
3610 }
3611
3612 static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
3613 diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
3614 index 9086edc9066b..977fd42daa1b 100644
3615 --- a/drivers/char/tpm/tpm_i2c_infineon.c
3616 +++ b/drivers/char/tpm/tpm_i2c_infineon.c
3617 @@ -587,7 +587,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
3618 /* go and do it */
3619 iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1);
3620
3621 - return len;
3622 + return 0;
3623 out_err:
3624 tpm_tis_i2c_ready(chip);
3625 /* The TPM needs some time to clean up here,
3626 diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
3627 index f74f451baf6a..b8defdfdf2dc 100644
3628 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c
3629 +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
3630 @@ -469,7 +469,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
3631 }
3632
3633 dev_dbg(dev, "%s() -> %zd\n", __func__, len);
3634 - return len;
3635 + return 0;
3636 }
3637
3638 static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
3639 diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
3640 index 25f6e2665385..77e47dc5aacc 100644
3641 --- a/drivers/char/tpm/tpm_ibmvtpm.c
3642 +++ b/drivers/char/tpm/tpm_ibmvtpm.c
3643 @@ -141,14 +141,14 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
3644 }
3645
3646 /**
3647 - * tpm_ibmvtpm_send - Send tpm request
3648 - *
3649 + * tpm_ibmvtpm_send() - Send a TPM command
3650 * @chip: tpm chip struct
3651 * @buf: buffer contains data to send
3652 * @count: size of buffer
3653 *
3654 * Return:
3655 - * Number of bytes sent or < 0 on error.
3656 + * 0 on success,
3657 + * -errno on error
3658 */
3659 static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
3660 {
3661 @@ -194,7 +194,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
3662 rc = 0;
3663 ibmvtpm->tpm_processing_cmd = false;
3664 } else
3665 - rc = count;
3666 + rc = 0;
3667
3668 spin_unlock(&ibmvtpm->rtce_lock);
3669 return rc;
3670 diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
3671 index d8f10047fbba..97f6d4fe0aee 100644
3672 --- a/drivers/char/tpm/tpm_infineon.c
3673 +++ b/drivers/char/tpm/tpm_infineon.c
3674 @@ -354,7 +354,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
3675 for (i = 0; i < count; i++) {
3676 wait_and_send(chip, buf[i]);
3677 }
3678 - return count;
3679 + return 0;
3680 }
3681
3682 static void tpm_inf_cancel(struct tpm_chip *chip)
3683 diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
3684 index 5d6cce74cd3f..9bee3c5eb4bf 100644
3685 --- a/drivers/char/tpm/tpm_nsc.c
3686 +++ b/drivers/char/tpm/tpm_nsc.c
3687 @@ -226,7 +226,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
3688 }
3689 outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND);
3690
3691 - return count;
3692 + return 0;
3693 }
3694
3695 static void tpm_nsc_cancel(struct tpm_chip *chip)
3696 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
3697 index d2345d9fd7b5..0eaea3a7b8f4 100644
3698 --- a/drivers/char/tpm/tpm_tis_core.c
3699 +++ b/drivers/char/tpm/tpm_tis_core.c
3700 @@ -485,7 +485,7 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
3701 goto out_err;
3702 }
3703 }
3704 - return len;
3705 + return 0;
3706 out_err:
3707 tpm_tis_ready(chip);
3708 return rc;
3709 diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
3710 index 87a0ce47f201..ecbb63f8d231 100644
3711 --- a/drivers/char/tpm/tpm_vtpm_proxy.c
3712 +++ b/drivers/char/tpm/tpm_vtpm_proxy.c
3713 @@ -335,7 +335,6 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
3714 static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
3715 {
3716 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
3717 - int rc = 0;
3718
3719 if (count > sizeof(proxy_dev->buffer)) {
3720 dev_err(&chip->dev,
3721 @@ -366,7 +365,7 @@ static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
3722
3723 wake_up_interruptible(&proxy_dev->wq);
3724
3725 - return rc;
3726 + return 0;
3727 }
3728
3729 static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip)
3730 diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
3731 index b150f87f38f5..5a327eb7f63a 100644
3732 --- a/drivers/char/tpm/xen-tpmfront.c
3733 +++ b/drivers/char/tpm/xen-tpmfront.c
3734 @@ -173,7 +173,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
3735 return -ETIME;
3736 }
3737
3738 - return count;
3739 + return 0;
3740 }
3741
3742 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
3743 diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
3744 index 25dfe050ae9f..4bd1b32a4f93 100644
3745 --- a/drivers/clk/clk-twl6040.c
3746 +++ b/drivers/clk/clk-twl6040.c
3747 @@ -41,6 +41,43 @@ static int twl6040_pdmclk_is_prepared(struct clk_hw *hw)
3748 return pdmclk->enabled;
3749 }
3750
3751 +static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk,
3752 + unsigned int reg)
3753 +{
3754 + const u8 reset_mask = TWL6040_HPLLRST; /* Same for HPPLL and LPPLL */
3755 + int ret;
3756 +
3757 + ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask);
3758 + if (ret < 0)
3759 + return ret;
3760 +
3761 + ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask);
3762 + if (ret < 0)
3763 + return ret;
3764 +
3765 + return 0;
3766 +}
3767 +
3768 +/*
3769 + * TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At
3770 + * Cold Temperature". This affects cold boot and deeper idle states it
3771 + * seems. The workaround consists of resetting HPPLL and LPPLL.
3772 + */
3773 +static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk)
3774 +{
3775 + int ret;
3776 +
3777 + ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL);
3778 + if (ret)
3779 + return ret;
3780 +
3781 + ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL);
3782 + if (ret)
3783 + return ret;
3784 +
3785 + return 0;
3786 +}
3787 +
3788 static int twl6040_pdmclk_prepare(struct clk_hw *hw)
3789 {
3790 struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
3791 @@ -48,8 +85,20 @@ static int twl6040_pdmclk_prepare(struct clk_hw *hw)
3792 int ret;
3793
3794 ret = twl6040_power(pdmclk->twl6040, 1);
3795 - if (!ret)
3796 - pdmclk->enabled = 1;
3797 + if (ret)
3798 + return ret;
3799 +
3800 + ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk);
3801 + if (ret)
3802 + goto out_err;
3803 +
3804 + pdmclk->enabled = 1;
3805 +
3806 + return 0;
3807 +
3808 +out_err:
3809 + dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret);
3810 + twl6040_power(pdmclk->twl6040, 0);
3811
3812 return ret;
3813 }
3814 diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
3815 index 5ef7d9ba2195..b40160eb3372 100644
3816 --- a/drivers/clk/ingenic/cgu.c
3817 +++ b/drivers/clk/ingenic/cgu.c
3818 @@ -426,16 +426,16 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
3819 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
3820 struct ingenic_cgu *cgu = ingenic_clk->cgu;
3821 const struct ingenic_cgu_clk_info *clk_info;
3822 - long rate = *parent_rate;
3823 + unsigned int div = 1;
3824
3825 clk_info = &cgu->clock_info[ingenic_clk->idx];
3826
3827 if (clk_info->type & CGU_CLK_DIV)
3828 - rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
3829 + div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
3830 else if (clk_info->type & CGU_CLK_FIXDIV)
3831 - rate /= clk_info->fixdiv.div;
3832 + div = clk_info->fixdiv.div;
3833
3834 - return rate;
3835 + return DIV_ROUND_UP(*parent_rate, div);
3836 }
3837
3838 static int
3839 @@ -455,7 +455,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
3840
3841 if (clk_info->type & CGU_CLK_DIV) {
3842 div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
3843 - rate = parent_rate / div;
3844 + rate = DIV_ROUND_UP(parent_rate, div);
3845
3846 if (rate != req_rate)
3847 return -EINVAL;
3848 diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
3849 index 502bcbb61b04..e12716d8ce3c 100644
3850 --- a/drivers/clk/ingenic/cgu.h
3851 +++ b/drivers/clk/ingenic/cgu.h
3852 @@ -80,7 +80,7 @@ struct ingenic_cgu_mux_info {
3853 * @reg: offset of the divider control register within the CGU
3854 * @shift: number of bits to left shift the divide value by (ie. the index of
3855 * the lowest bit of the divide value within its control register)
3856 - * @div: number of bits to divide the divider value by (i.e. if the
3857 + * @div: number to divide the divider value by (i.e. if the
3858 * effective divider value is the value written to the register
3859 * multiplied by some constant)
3860 * @bits: the size of the divide value in bits
3861 diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
3862 index 93306283d764..8ae44b5db4c2 100644
3863 --- a/drivers/clk/samsung/clk-exynos5-subcmu.c
3864 +++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
3865 @@ -136,15 +136,20 @@ static int __init exynos5_clk_register_subcmu(struct device *parent,
3866 {
3867 struct of_phandle_args genpdspec = { .np = pd_node };
3868 struct platform_device *pdev;
3869 + int ret;
3870 +
3871 + pdev = platform_device_alloc("exynos5-subcmu", PLATFORM_DEVID_AUTO);
3872 + if (!pdev)
3873 + return -ENOMEM;
3874
3875 - pdev = platform_device_alloc(info->pd_name, -1);
3876 pdev->dev.parent = parent;
3877 - pdev->driver_override = "exynos5-subcmu";
3878 platform_set_drvdata(pdev, (void *)info);
3879 of_genpd_add_device(&genpdspec, &pdev->dev);
3880 - platform_device_add(pdev);
3881 + ret = platform_device_add(pdev);
3882 + if (ret)
3883 + platform_device_put(pdev);
3884
3885 - return 0;
3886 + return ret;
3887 }
3888
3889 static int __init exynos5_clk_probe(struct platform_device *pdev)
3890 diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
3891 index 3b97f60540ad..609970c0b666 100644
3892 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
3893 +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
3894 @@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1",
3895 static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1",
3896 0x060, BIT(10), 0);
3897 static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1",
3898 - 0x060, BIT(12), 0);
3899 + 0x060, BIT(11), 0);
3900 static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1",
3901 - 0x060, BIT(13), 0);
3902 + 0x060, BIT(12), 0);
3903 static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1",
3904 0x060, BIT(13), 0);
3905 static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1",
3906 diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
3907 index 621b1cd996db..ac12f261f8ca 100644
3908 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
3909 +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
3910 @@ -542,7 +542,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
3911 [RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
3912
3913 [RST_BUS_VE] = { 0x2c4, BIT(0) },
3914 - [RST_BUS_TCON0] = { 0x2c4, BIT(3) },
3915 + [RST_BUS_TCON0] = { 0x2c4, BIT(4) },
3916 [RST_BUS_CSI] = { 0x2c4, BIT(8) },
3917 [RST_BUS_DE] = { 0x2c4, BIT(12) },
3918 [RST_BUS_DBG] = { 0x2c4, BIT(31) },
3919 diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c
3920 index ec11f55594ad..5d2d42b7e182 100644
3921 --- a/drivers/clk/uniphier/clk-uniphier-cpugear.c
3922 +++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c
3923 @@ -47,7 +47,7 @@ static int uniphier_clk_cpugear_set_parent(struct clk_hw *hw, u8 index)
3924 return ret;
3925
3926 ret = regmap_write_bits(gear->regmap,
3927 - gear->regbase + UNIPHIER_CLK_CPUGEAR_SET,
3928 + gear->regbase + UNIPHIER_CLK_CPUGEAR_UPD,
3929 UNIPHIER_CLK_CPUGEAR_UPD_BIT,
3930 UNIPHIER_CLK_CPUGEAR_UPD_BIT);
3931 if (ret)
3932 diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
3933 index 316d48d7be72..c1ddafa4c299 100644
3934 --- a/drivers/clocksource/Kconfig
3935 +++ b/drivers/clocksource/Kconfig
3936 @@ -365,6 +365,16 @@ config ARM64_ERRATUM_858921
3937 The workaround will be dynamically enabled when an affected
3938 core is detected.
3939
3940 +config SUN50I_ERRATUM_UNKNOWN1
3941 + bool "Workaround for Allwinner A64 erratum UNKNOWN1"
3942 + default y
3943 + depends on ARM_ARCH_TIMER && ARM64 && ARCH_SUNXI
3944 + select ARM_ARCH_TIMER_OOL_WORKAROUND
3945 + help
3946 + This option enables a workaround for instability in the timer on
3947 + the Allwinner A64 SoC. The workaround will only be active if the
3948 + allwinner,erratum-unknown1 property is found in the timer node.
3949 +
3950 config ARM_GLOBAL_TIMER
3951 bool "Support for the ARM global timer" if COMPILE_TEST
3952 select TIMER_OF if OF
3953 diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
3954 index d8c7f5750cdb..0445ad7e559e 100644
3955 --- a/drivers/clocksource/arm_arch_timer.c
3956 +++ b/drivers/clocksource/arm_arch_timer.c
3957 @@ -319,6 +319,48 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
3958 }
3959 #endif
3960
3961 +#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
3962 +/*
3963 + * The low bits of the counter registers are indeterminate while bit 10 or
3964 + * greater is rolling over. Since the counter value can jump both backward
3965 + * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
3966 + * with all ones or all zeros in the low bits. Bound the loop by the maximum
3967 + * number of CPU cycles in 3 consecutive 24 MHz counter periods.
3968 + */
3969 +#define __sun50i_a64_read_reg(reg) ({ \
3970 + u64 _val; \
3971 + int _retries = 150; \
3972 + \
3973 + do { \
3974 + _val = read_sysreg(reg); \
3975 + _retries--; \
3976 + } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
3977 + \
3978 + WARN_ON_ONCE(!_retries); \
3979 + _val; \
3980 +})
3981 +
3982 +static u64 notrace sun50i_a64_read_cntpct_el0(void)
3983 +{
3984 + return __sun50i_a64_read_reg(cntpct_el0);
3985 +}
3986 +
3987 +static u64 notrace sun50i_a64_read_cntvct_el0(void)
3988 +{
3989 + return __sun50i_a64_read_reg(cntvct_el0);
3990 +}
3991 +
3992 +static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
3993 +{
3994 + return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
3995 +}
3996 +
3997 +static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
3998 +{
3999 + return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
4000 +}
4001 +#endif
4002 +
4003 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
4004 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
4005 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
4006 @@ -408,6 +450,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
4007 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
4008 },
4009 #endif
4010 +#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
4011 + {
4012 + .match_type = ate_match_dt,
4013 + .id = "allwinner,erratum-unknown1",
4014 + .desc = "Allwinner erratum UNKNOWN1",
4015 + .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
4016 + .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
4017 + .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
4018 + .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
4019 + .set_next_event_phys = erratum_set_next_event_tval_phys,
4020 + .set_next_event_virt = erratum_set_next_event_tval_virt,
4021 + },
4022 +#endif
4023 };
4024
4025 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
4026 diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
4027 index 7a244b681876..d55c30f6981d 100644
4028 --- a/drivers/clocksource/exynos_mct.c
4029 +++ b/drivers/clocksource/exynos_mct.c
4030 @@ -388,6 +388,13 @@ static void exynos4_mct_tick_start(unsigned long cycles,
4031 exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
4032 }
4033
4034 +static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
4035 +{
4036 + /* Clear the MCT tick interrupt */
4037 + if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
4038 + exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
4039 +}
4040 +
4041 static int exynos4_tick_set_next_event(unsigned long cycles,
4042 struct clock_event_device *evt)
4043 {
4044 @@ -404,6 +411,7 @@ static int set_state_shutdown(struct clock_event_device *evt)
4045
4046 mevt = container_of(evt, struct mct_clock_event_device, evt);
4047 exynos4_mct_tick_stop(mevt);
4048 + exynos4_mct_tick_clear(mevt);
4049 return 0;
4050 }
4051
4052 @@ -420,8 +428,11 @@ static int set_state_periodic(struct clock_event_device *evt)
4053 return 0;
4054 }
4055
4056 -static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
4057 +static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
4058 {
4059 + struct mct_clock_event_device *mevt = dev_id;
4060 + struct clock_event_device *evt = &mevt->evt;
4061 +
4062 /*
4063 * This is for supporting oneshot mode.
4064 * Mct would generate interrupt periodically
4065 @@ -430,16 +441,6 @@ static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
4066 if (!clockevent_state_periodic(&mevt->evt))
4067 exynos4_mct_tick_stop(mevt);
4068
4069 - /* Clear the MCT tick interrupt */
4070 - if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
4071 - exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
4072 -}
4073 -
4074 -static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
4075 -{
4076 - struct mct_clock_event_device *mevt = dev_id;
4077 - struct clock_event_device *evt = &mevt->evt;
4078 -
4079 exynos4_mct_tick_clear(mevt);
4080
4081 evt->event_handler(evt);
4082 diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
4083 index 4cce6b224b87..3ecf84706640 100644
4084 --- a/drivers/clocksource/timer-ti-dm.c
4085 +++ b/drivers/clocksource/timer-ti-dm.c
4086 @@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer)
4087 if (IS_ERR(parent))
4088 return -ENODEV;
4089
4090 + /* Bail out if both clocks point to fck */
4091 + if (clk_is_match(parent, timer->fclk))
4092 + return 0;
4093 +
4094 ret = clk_set_parent(timer->fclk, parent);
4095 if (ret < 0)
4096 pr_err("%s: failed to set parent\n", __func__);
4097 diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
4098 index 46254e583982..74e0e0c20c46 100644
4099 --- a/drivers/cpufreq/pxa2xx-cpufreq.c
4100 +++ b/drivers/cpufreq/pxa2xx-cpufreq.c
4101 @@ -143,7 +143,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
4102 return ret;
4103 }
4104
4105 -static void __init pxa_cpufreq_init_voltages(void)
4106 +static void pxa_cpufreq_init_voltages(void)
4107 {
4108 vcc_core = regulator_get(NULL, "vcc_core");
4109 if (IS_ERR(vcc_core)) {
4110 @@ -159,7 +159,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
4111 return 0;
4112 }
4113
4114 -static void __init pxa_cpufreq_init_voltages(void) { }
4115 +static void pxa_cpufreq_init_voltages(void) { }
4116 #endif
4117
4118 static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
4119 diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
4120 index 2a3675c24032..a472b814058f 100644
4121 --- a/drivers/cpufreq/qcom-cpufreq-kryo.c
4122 +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
4123 @@ -75,7 +75,7 @@ static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
4124
4125 static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
4126 {
4127 - struct opp_table *opp_tables[NR_CPUS] = {0};
4128 + struct opp_table **opp_tables;
4129 enum _msm8996_version msm8996_version;
4130 struct nvmem_cell *speedbin_nvmem;
4131 struct device_node *np;
4132 @@ -133,6 +133,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
4133 }
4134 kfree(speedbin);
4135
4136 + opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL);
4137 + if (!opp_tables)
4138 + return -ENOMEM;
4139 +
4140 for_each_possible_cpu(cpu) {
4141 cpu_dev = get_cpu_device(cpu);
4142 if (NULL == cpu_dev) {
4143 @@ -151,8 +155,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
4144
4145 cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
4146 NULL, 0);
4147 - if (!IS_ERR(cpufreq_dt_pdev))
4148 + if (!IS_ERR(cpufreq_dt_pdev)) {
4149 + platform_set_drvdata(pdev, opp_tables);
4150 return 0;
4151 + }
4152
4153 ret = PTR_ERR(cpufreq_dt_pdev);
4154 dev_err(cpu_dev, "Failed to register platform device\n");
4155 @@ -163,13 +169,23 @@ free_opp:
4156 break;
4157 dev_pm_opp_put_supported_hw(opp_tables[cpu]);
4158 }
4159 + kfree(opp_tables);
4160
4161 return ret;
4162 }
4163
4164 static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
4165 {
4166 + struct opp_table **opp_tables = platform_get_drvdata(pdev);
4167 + unsigned int cpu;
4168 +
4169 platform_device_unregister(cpufreq_dt_pdev);
4170 +
4171 + for_each_possible_cpu(cpu)
4172 + dev_pm_opp_put_supported_hw(opp_tables[cpu]);
4173 +
4174 + kfree(opp_tables);
4175 +
4176 return 0;
4177 }
4178
4179 diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
4180 index 43530254201a..4bb154f6c54c 100644
4181 --- a/drivers/cpufreq/tegra124-cpufreq.c
4182 +++ b/drivers/cpufreq/tegra124-cpufreq.c
4183 @@ -134,6 +134,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
4184
4185 platform_set_drvdata(pdev, priv);
4186
4187 + of_node_put(np);
4188 +
4189 return 0;
4190
4191 out_switch_to_pllx:
4192 diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
4193 index ec40f991e6c6..9bc54c3c2cb9 100644
4194 --- a/drivers/crypto/caam/caamalg.c
4195 +++ b/drivers/crypto/caam/caamalg.c
4196 @@ -1005,6 +1005,7 @@ static void init_aead_job(struct aead_request *req,
4197 if (unlikely(req->src != req->dst)) {
4198 if (edesc->dst_nents == 1) {
4199 dst_dma = sg_dma_address(req->dst);
4200 + out_options = 0;
4201 } else {
4202 dst_dma = edesc->sec4_sg_dma +
4203 sec4_sg_index *
4204 diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
4205 index f84ca2ff61de..f5fd00065650 100644
4206 --- a/drivers/crypto/caam/caamhash.c
4207 +++ b/drivers/crypto/caam/caamhash.c
4208 @@ -118,6 +118,7 @@ struct caam_hash_ctx {
4209 struct caam_hash_state {
4210 dma_addr_t buf_dma;
4211 dma_addr_t ctx_dma;
4212 + int ctx_dma_len;
4213 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
4214 int buflen_0;
4215 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
4216 @@ -170,6 +171,7 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
4217 struct caam_hash_state *state,
4218 int ctx_len)
4219 {
4220 + state->ctx_dma_len = ctx_len;
4221 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
4222 ctx_len, DMA_FROM_DEVICE);
4223 if (dma_mapping_error(jrdev, state->ctx_dma)) {
4224 @@ -183,18 +185,6 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
4225 return 0;
4226 }
4227
4228 -/* Map req->result, and append seq_out_ptr command that points to it */
4229 -static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
4230 - u8 *result, int digestsize)
4231 -{
4232 - dma_addr_t dst_dma;
4233 -
4234 - dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
4235 - append_seq_out_ptr(desc, dst_dma, digestsize, 0);
4236 -
4237 - return dst_dma;
4238 -}
4239 -
4240 /* Map current buffer in state (if length > 0) and put it in link table */
4241 static inline int buf_map_to_sec4_sg(struct device *jrdev,
4242 struct sec4_sg_entry *sec4_sg,
4243 @@ -223,6 +213,7 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
4244 struct caam_hash_state *state, int ctx_len,
4245 struct sec4_sg_entry *sec4_sg, u32 flag)
4246 {
4247 + state->ctx_dma_len = ctx_len;
4248 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
4249 if (dma_mapping_error(jrdev, state->ctx_dma)) {
4250 dev_err(jrdev, "unable to map ctx\n");
4251 @@ -485,7 +476,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
4252
4253 /*
4254 * ahash_edesc - s/w-extended ahash descriptor
4255 - * @dst_dma: physical mapped address of req->result
4256 * @sec4_sg_dma: physical mapped address of h/w link table
4257 * @src_nents: number of segments in input scatterlist
4258 * @sec4_sg_bytes: length of dma mapped sec4_sg space
4259 @@ -493,7 +483,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
4260 * @sec4_sg: h/w link table
4261 */
4262 struct ahash_edesc {
4263 - dma_addr_t dst_dma;
4264 dma_addr_t sec4_sg_dma;
4265 int src_nents;
4266 int sec4_sg_bytes;
4267 @@ -509,8 +498,6 @@ static inline void ahash_unmap(struct device *dev,
4268
4269 if (edesc->src_nents)
4270 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
4271 - if (edesc->dst_dma)
4272 - dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
4273
4274 if (edesc->sec4_sg_bytes)
4275 dma_unmap_single(dev, edesc->sec4_sg_dma,
4276 @@ -527,12 +514,10 @@ static inline void ahash_unmap_ctx(struct device *dev,
4277 struct ahash_edesc *edesc,
4278 struct ahash_request *req, int dst_len, u32 flag)
4279 {
4280 - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4281 - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4282 struct caam_hash_state *state = ahash_request_ctx(req);
4283
4284 if (state->ctx_dma) {
4285 - dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
4286 + dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
4287 state->ctx_dma = 0;
4288 }
4289 ahash_unmap(dev, edesc, req, dst_len);
4290 @@ -545,9 +530,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
4291 struct ahash_edesc *edesc;
4292 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4293 int digestsize = crypto_ahash_digestsize(ahash);
4294 + struct caam_hash_state *state = ahash_request_ctx(req);
4295 #ifdef DEBUG
4296 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4297 - struct caam_hash_state *state = ahash_request_ctx(req);
4298
4299 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
4300 #endif
4301 @@ -556,17 +541,14 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
4302 if (err)
4303 caam_jr_strstatus(jrdev, err);
4304
4305 - ahash_unmap(jrdev, edesc, req, digestsize);
4306 + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
4307 + memcpy(req->result, state->caam_ctx, digestsize);
4308 kfree(edesc);
4309
4310 #ifdef DEBUG
4311 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
4312 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
4313 ctx->ctx_len, 1);
4314 - if (req->result)
4315 - print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
4316 - DUMP_PREFIX_ADDRESS, 16, 4, req->result,
4317 - digestsize, 1);
4318 #endif
4319
4320 req->base.complete(&req->base, err);
4321 @@ -614,9 +596,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
4322 struct ahash_edesc *edesc;
4323 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4324 int digestsize = crypto_ahash_digestsize(ahash);
4325 + struct caam_hash_state *state = ahash_request_ctx(req);
4326 #ifdef DEBUG
4327 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4328 - struct caam_hash_state *state = ahash_request_ctx(req);
4329
4330 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
4331 #endif
4332 @@ -625,17 +607,14 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
4333 if (err)
4334 caam_jr_strstatus(jrdev, err);
4335
4336 - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
4337 + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
4338 + memcpy(req->result, state->caam_ctx, digestsize);
4339 kfree(edesc);
4340
4341 #ifdef DEBUG
4342 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
4343 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
4344 ctx->ctx_len, 1);
4345 - if (req->result)
4346 - print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
4347 - DUMP_PREFIX_ADDRESS, 16, 4, req->result,
4348 - digestsize, 1);
4349 #endif
4350
4351 req->base.complete(&req->base, err);
4352 @@ -896,7 +875,7 @@ static int ahash_final_ctx(struct ahash_request *req)
4353 edesc->sec4_sg_bytes = sec4_sg_bytes;
4354
4355 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
4356 - edesc->sec4_sg, DMA_TO_DEVICE);
4357 + edesc->sec4_sg, DMA_BIDIRECTIONAL);
4358 if (ret)
4359 goto unmap_ctx;
4360
4361 @@ -916,14 +895,7 @@ static int ahash_final_ctx(struct ahash_request *req)
4362
4363 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
4364 LDST_SGF);
4365 -
4366 - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
4367 - digestsize);
4368 - if (dma_mapping_error(jrdev, edesc->dst_dma)) {
4369 - dev_err(jrdev, "unable to map dst\n");
4370 - ret = -ENOMEM;
4371 - goto unmap_ctx;
4372 - }
4373 + append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
4374
4375 #ifdef DEBUG
4376 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
4377 @@ -936,7 +908,7 @@ static int ahash_final_ctx(struct ahash_request *req)
4378
4379 return -EINPROGRESS;
4380 unmap_ctx:
4381 - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
4382 + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
4383 kfree(edesc);
4384 return ret;
4385 }
4386 @@ -990,7 +962,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
4387 edesc->src_nents = src_nents;
4388
4389 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
4390 - edesc->sec4_sg, DMA_TO_DEVICE);
4391 + edesc->sec4_sg, DMA_BIDIRECTIONAL);
4392 if (ret)
4393 goto unmap_ctx;
4394
4395 @@ -1004,13 +976,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
4396 if (ret)
4397 goto unmap_ctx;
4398
4399 - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
4400 - digestsize);
4401 - if (dma_mapping_error(jrdev, edesc->dst_dma)) {
4402 - dev_err(jrdev, "unable to map dst\n");
4403 - ret = -ENOMEM;
4404 - goto unmap_ctx;
4405 - }
4406 + append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
4407
4408 #ifdef DEBUG
4409 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
4410 @@ -1023,7 +989,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
4411
4412 return -EINPROGRESS;
4413 unmap_ctx:
4414 - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
4415 + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
4416 kfree(edesc);
4417 return ret;
4418 }
4419 @@ -1082,10 +1048,8 @@ static int ahash_digest(struct ahash_request *req)
4420
4421 desc = edesc->hw_desc;
4422
4423 - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
4424 - digestsize);
4425 - if (dma_mapping_error(jrdev, edesc->dst_dma)) {
4426 - dev_err(jrdev, "unable to map dst\n");
4427 + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
4428 + if (ret) {
4429 ahash_unmap(jrdev, edesc, req, digestsize);
4430 kfree(edesc);
4431 return -ENOMEM;
4432 @@ -1100,7 +1064,7 @@ static int ahash_digest(struct ahash_request *req)
4433 if (!ret) {
4434 ret = -EINPROGRESS;
4435 } else {
4436 - ahash_unmap(jrdev, edesc, req, digestsize);
4437 + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
4438 kfree(edesc);
4439 }
4440
4441 @@ -1142,12 +1106,9 @@ static int ahash_final_no_ctx(struct ahash_request *req)
4442 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
4443 }
4444
4445 - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
4446 - digestsize);
4447 - if (dma_mapping_error(jrdev, edesc->dst_dma)) {
4448 - dev_err(jrdev, "unable to map dst\n");
4449 + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
4450 + if (ret)
4451 goto unmap;
4452 - }
4453
4454 #ifdef DEBUG
4455 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
4456 @@ -1158,7 +1119,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
4457 if (!ret) {
4458 ret = -EINPROGRESS;
4459 } else {
4460 - ahash_unmap(jrdev, edesc, req, digestsize);
4461 + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
4462 kfree(edesc);
4463 }
4464
4465 @@ -1357,12 +1318,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
4466 goto unmap;
4467 }
4468
4469 - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
4470 - digestsize);
4471 - if (dma_mapping_error(jrdev, edesc->dst_dma)) {
4472 - dev_err(jrdev, "unable to map dst\n");
4473 + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
4474 + if (ret)
4475 goto unmap;
4476 - }
4477
4478 #ifdef DEBUG
4479 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
4480 @@ -1373,7 +1331,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
4481 if (!ret) {
4482 ret = -EINPROGRESS;
4483 } else {
4484 - ahash_unmap(jrdev, edesc, req, digestsize);
4485 + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
4486 kfree(edesc);
4487 }
4488
4489 @@ -1505,6 +1463,7 @@ static int ahash_init(struct ahash_request *req)
4490 state->final = ahash_final_no_ctx;
4491
4492 state->ctx_dma = 0;
4493 + state->ctx_dma_len = 0;
4494 state->current_buf = 0;
4495 state->buf_dma = 0;
4496 state->buflen_0 = 0;
4497 diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
4498 index dd948e1df9e5..3bcb6bce666e 100644
4499 --- a/drivers/crypto/ccree/cc_buffer_mgr.c
4500 +++ b/drivers/crypto/ccree/cc_buffer_mgr.c
4501 @@ -614,10 +614,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
4502 hw_iv_size, DMA_BIDIRECTIONAL);
4503 }
4504
4505 - /*In case a pool was set, a table was
4506 - *allocated and should be released
4507 - */
4508 - if (areq_ctx->mlli_params.curr_pool) {
4509 + /* Release pool */
4510 + if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
4511 + areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
4512 + (areq_ctx->mlli_params.mlli_virt_addr)) {
4513 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
4514 &areq_ctx->mlli_params.mlli_dma_addr,
4515 areq_ctx->mlli_params.mlli_virt_addr);
4516 diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
4517 index 7623b29911af..54a39164aab8 100644
4518 --- a/drivers/crypto/ccree/cc_cipher.c
4519 +++ b/drivers/crypto/ccree/cc_cipher.c
4520 @@ -79,6 +79,7 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
4521 default:
4522 break;
4523 }
4524 + break;
4525 case S_DIN_to_DES:
4526 if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
4527 return 0;
4528 @@ -634,6 +635,8 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
4529 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
4530 unsigned int len;
4531
4532 + cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
4533 +
4534 switch (ctx_p->cipher_mode) {
4535 case DRV_CIPHER_CBC:
4536 /*
4537 @@ -663,7 +666,6 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
4538 break;
4539 }
4540
4541 - cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
4542 kzfree(req_ctx->iv);
4543
4544 skcipher_request_complete(req, err);
4545 @@ -781,7 +783,8 @@ static int cc_cipher_decrypt(struct skcipher_request *req)
4546
4547 memset(req_ctx, 0, sizeof(*req_ctx));
4548
4549 - if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
4550 + if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
4551 + (req->cryptlen >= ivsize)) {
4552
4553 /* Allocate and save the last IV sized bytes of the source,
4554 * which will be lost in case of in-place decryption.
4555 diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
4556 index c9d622abd90c..0ce4a65b95f5 100644
4557 --- a/drivers/crypto/rockchip/rk3288_crypto.c
4558 +++ b/drivers/crypto/rockchip/rk3288_crypto.c
4559 @@ -119,7 +119,7 @@ static int rk_load_data(struct rk_crypto_info *dev,
4560 count = (dev->left_bytes > PAGE_SIZE) ?
4561 PAGE_SIZE : dev->left_bytes;
4562
4563 - if (!sg_pcopy_to_buffer(dev->first, dev->nents,
4564 + if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
4565 dev->addr_vir, count,
4566 dev->total - dev->left_bytes)) {
4567 dev_err(dev->dev, "[%s:%d] pcopy err\n",
4568 diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
4569 index d5fb4013fb42..54ee5b3ed9db 100644
4570 --- a/drivers/crypto/rockchip/rk3288_crypto.h
4571 +++ b/drivers/crypto/rockchip/rk3288_crypto.h
4572 @@ -207,7 +207,8 @@ struct rk_crypto_info {
4573 void *addr_vir;
4574 int aligned;
4575 int align_size;
4576 - size_t nents;
4577 + size_t src_nents;
4578 + size_t dst_nents;
4579 unsigned int total;
4580 unsigned int count;
4581 dma_addr_t addr_in;
4582 @@ -244,6 +245,7 @@ struct rk_cipher_ctx {
4583 struct rk_crypto_info *dev;
4584 unsigned int keylen;
4585 u32 mode;
4586 + u8 iv[AES_BLOCK_SIZE];
4587 };
4588
4589 enum alg_type {
4590 diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
4591 index 639c15c5364b..23305f22072f 100644
4592 --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
4593 +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
4594 @@ -242,6 +242,17 @@ static void crypto_dma_start(struct rk_crypto_info *dev)
4595 static int rk_set_data_start(struct rk_crypto_info *dev)
4596 {
4597 int err;
4598 + struct ablkcipher_request *req =
4599 + ablkcipher_request_cast(dev->async_req);
4600 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
4601 + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
4602 + u32 ivsize = crypto_ablkcipher_ivsize(tfm);
4603 + u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
4604 + dev->sg_src->offset + dev->sg_src->length - ivsize;
4605 +
4606 + /* store the iv that need to be updated in chain mode */
4607 + if (ctx->mode & RK_CRYPTO_DEC)
4608 + memcpy(ctx->iv, src_last_blk, ivsize);
4609
4610 err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
4611 if (!err)
4612 @@ -260,8 +271,9 @@ static int rk_ablk_start(struct rk_crypto_info *dev)
4613 dev->total = req->nbytes;
4614 dev->sg_src = req->src;
4615 dev->first = req->src;
4616 - dev->nents = sg_nents(req->src);
4617 + dev->src_nents = sg_nents(req->src);
4618 dev->sg_dst = req->dst;
4619 + dev->dst_nents = sg_nents(req->dst);
4620 dev->aligned = 1;
4621
4622 spin_lock_irqsave(&dev->lock, flags);
4623 @@ -285,6 +297,28 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
4624 memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
4625 }
4626
4627 +static void rk_update_iv(struct rk_crypto_info *dev)
4628 +{
4629 + struct ablkcipher_request *req =
4630 + ablkcipher_request_cast(dev->async_req);
4631 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
4632 + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
4633 + u32 ivsize = crypto_ablkcipher_ivsize(tfm);
4634 + u8 *new_iv = NULL;
4635 +
4636 + if (ctx->mode & RK_CRYPTO_DEC) {
4637 + new_iv = ctx->iv;
4638 + } else {
4639 + new_iv = page_address(sg_page(dev->sg_dst)) +
4640 + dev->sg_dst->offset + dev->sg_dst->length - ivsize;
4641 + }
4642 +
4643 + if (ivsize == DES_BLOCK_SIZE)
4644 + memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
4645 + else if (ivsize == AES_BLOCK_SIZE)
4646 + memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
4647 +}
4648 +
4649 /* return:
4650 * true some err was occurred
4651 * fault no err, continue
4652 @@ -297,7 +331,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
4653
4654 dev->unload_data(dev);
4655 if (!dev->aligned) {
4656 - if (!sg_pcopy_from_buffer(req->dst, dev->nents,
4657 + if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
4658 dev->addr_vir, dev->count,
4659 dev->total - dev->left_bytes -
4660 dev->count)) {
4661 @@ -306,6 +340,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
4662 }
4663 }
4664 if (dev->left_bytes) {
4665 + rk_update_iv(dev);
4666 if (dev->aligned) {
4667 if (sg_is_last(dev->sg_src)) {
4668 dev_err(dev->dev, "[%s:%d] Lack of data\n",
4669 diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
4670 index 821a506b9e17..c336ae75e361 100644
4671 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
4672 +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
4673 @@ -206,7 +206,7 @@ static int rk_ahash_start(struct rk_crypto_info *dev)
4674 dev->sg_dst = NULL;
4675 dev->sg_src = req->src;
4676 dev->first = req->src;
4677 - dev->nents = sg_nents(req->src);
4678 + dev->src_nents = sg_nents(req->src);
4679 rctx = ahash_request_ctx(req);
4680 rctx->mode = 0;
4681
4682 diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
4683 index 1bb1a8e09025..6c94ed750049 100644
4684 --- a/drivers/dma/sh/usb-dmac.c
4685 +++ b/drivers/dma/sh/usb-dmac.c
4686 @@ -697,6 +697,8 @@ static int usb_dmac_runtime_resume(struct device *dev)
4687 #endif /* CONFIG_PM */
4688
4689 static const struct dev_pm_ops usb_dmac_pm = {
4690 + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
4691 + pm_runtime_force_resume)
4692 SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
4693 NULL)
4694 };
4695 diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
4696 index 023a32cfac42..e0657fc72d31 100644
4697 --- a/drivers/gpio/gpio-pca953x.c
4698 +++ b/drivers/gpio/gpio-pca953x.c
4699 @@ -543,7 +543,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
4700
4701 static void pca953x_irq_shutdown(struct irq_data *d)
4702 {
4703 - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
4704 + struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
4705 + struct pca953x_chip *chip = gpiochip_get_data(gc);
4706 u8 mask = 1 << (d->hwirq % BANK_SZ);
4707
4708 chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
4709 diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
4710 index bd039322f697..6342f6499351 100644
4711 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
4712 +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
4713 @@ -1347,12 +1347,12 @@ void dcn_bw_update_from_pplib(struct dc *dc)
4714 struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
4715 bool res;
4716
4717 - kernel_fpu_begin();
4718 -
4719 /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
4720 res = dm_pp_get_clock_levels_by_type_with_voltage(
4721 ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
4722
4723 + kernel_fpu_begin();
4724 +
4725 if (res)
4726 res = verify_clock_values(&fclks);
4727
4728 @@ -1371,9 +1371,13 @@ void dcn_bw_update_from_pplib(struct dc *dc)
4729 } else
4730 BREAK_TO_DEBUGGER();
4731
4732 + kernel_fpu_end();
4733 +
4734 res = dm_pp_get_clock_levels_by_type_with_voltage(
4735 ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
4736
4737 + kernel_fpu_begin();
4738 +
4739 if (res)
4740 res = verify_clock_values(&dcfclks);
4741
4742 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
4743 index 052e60dfaf9f..b52ccab428a9 100644
4744 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
4745 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
4746 @@ -3487,14 +3487,14 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
4747
4748 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
4749 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4750 - ixSMU_PM_STATUS_94, 0);
4751 + ixSMU_PM_STATUS_95, 0);
4752
4753 for (i = 0; i < 10; i++) {
4754 - mdelay(1);
4755 + mdelay(500);
4756 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
4757 tmp = cgs_read_ind_register(hwmgr->device,
4758 CGS_IND_REG__SMC,
4759 - ixSMU_PM_STATUS_94);
4760 + ixSMU_PM_STATUS_95);
4761 if (tmp != 0)
4762 break;
4763 }
4764 diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
4765 index 1bda809a7289..e65596617239 100644
4766 --- a/drivers/gpu/drm/drm_fb_helper.c
4767 +++ b/drivers/gpu/drm/drm_fb_helper.c
4768 @@ -3156,9 +3156,7 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client)
4769
4770 static int drm_fbdev_client_restore(struct drm_client_dev *client)
4771 {
4772 - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
4773 -
4774 - drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
4775 + drm_fb_helper_lastclose(client->dev);
4776
4777 return 0;
4778 }
4779 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
4780 index 280c851714e6..03cda197fb6b 100644
4781 --- a/drivers/gpu/drm/i915/i915_gem.c
4782 +++ b/drivers/gpu/drm/i915/i915_gem.c
4783 @@ -1828,7 +1828,8 @@ __vma_matches(struct vm_area_struct *vma, struct file *filp,
4784 if (vma->vm_file != filp)
4785 return false;
4786
4787 - return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
4788 + return vma->vm_start == addr &&
4789 + (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
4790 }
4791
4792 /**
4793 diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
4794 index 3bd0f8a18e74..42daa5c9ff8e 100644
4795 --- a/drivers/gpu/drm/imx/imx-ldb.c
4796 +++ b/drivers/gpu/drm/imx/imx-ldb.c
4797 @@ -651,8 +651,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
4798 int bus_format;
4799
4800 ret = of_property_read_u32(child, "reg", &i);
4801 - if (ret || i < 0 || i > 1)
4802 - return -EINVAL;
4803 + if (ret || i < 0 || i > 1) {
4804 + ret = -EINVAL;
4805 + goto free_child;
4806 + }
4807
4808 if (!of_device_is_available(child))
4809 continue;
4810 @@ -665,7 +667,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
4811 channel = &imx_ldb->channel[i];
4812 channel->ldb = imx_ldb;
4813 channel->chno = i;
4814 - channel->child = child;
4815
4816 /*
4817 * The output port is port@4 with an external 4-port mux or
4818 @@ -675,13 +676,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
4819 imx_ldb->lvds_mux ? 4 : 2, 0,
4820 &channel->panel, &channel->bridge);
4821 if (ret && ret != -ENODEV)
4822 - return ret;
4823 + goto free_child;
4824
4825 /* panel ddc only if there is no bridge */
4826 if (!channel->bridge) {
4827 ret = imx_ldb_panel_ddc(dev, channel, child);
4828 if (ret)
4829 - return ret;
4830 + goto free_child;
4831 }
4832
4833 bus_format = of_get_bus_format(dev, child);
4834 @@ -697,18 +698,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
4835 if (bus_format < 0) {
4836 dev_err(dev, "could not determine data mapping: %d\n",
4837 bus_format);
4838 - return bus_format;
4839 + ret = bus_format;
4840 + goto free_child;
4841 }
4842 channel->bus_format = bus_format;
4843 + channel->child = child;
4844
4845 ret = imx_ldb_register(drm, channel);
4846 - if (ret)
4847 - return ret;
4848 + if (ret) {
4849 + channel->child = NULL;
4850 + goto free_child;
4851 + }
4852 }
4853
4854 dev_set_drvdata(dev, imx_ldb);
4855
4856 return 0;
4857 +
4858 +free_child:
4859 + of_node_put(child);
4860 + return ret;
4861 }
4862
4863 static void imx_ldb_unbind(struct device *dev, struct device *master,
4864 diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
4865 index 203f247d4854..a323a0db2fc1 100644
4866 --- a/drivers/gpu/drm/imx/ipuv3-plane.c
4867 +++ b/drivers/gpu/drm/imx/ipuv3-plane.c
4868 @@ -375,9 +375,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
4869 if (ret)
4870 return ret;
4871
4872 - /* CRTC should be enabled */
4873 + /* nothing to check when disabling or disabled */
4874 if (!crtc_state->enable)
4875 - return -EINVAL;
4876 + return 0;
4877
4878 switch (plane->type) {
4879 case DRM_PLANE_TYPE_PRIMARY:
4880 diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
4881 index 54324330b91f..2f0a5bd50174 100644
4882 --- a/drivers/gpu/drm/radeon/evergreen_cs.c
4883 +++ b/drivers/gpu/drm/radeon/evergreen_cs.c
4884 @@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
4885 return -EINVAL;
4886 }
4887 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
4888 + break;
4889 case CB_TARGET_MASK:
4890 track->cb_target_mask = radeon_get_ib_value(p, idx);
4891 track->cb_dirty = true;
4892 diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
4893 index 474b00e19697..0a7d4395d427 100644
4894 --- a/drivers/gpu/ipu-v3/ipu-common.c
4895 +++ b/drivers/gpu/ipu-v3/ipu-common.c
4896 @@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = {
4897 .cpmem_ofs = 0x1f000000,
4898 .srm_ofs = 0x1f040000,
4899 .tpm_ofs = 0x1f060000,
4900 - .csi0_ofs = 0x1f030000,
4901 - .csi1_ofs = 0x1f038000,
4902 + .csi0_ofs = 0x1e030000,
4903 + .csi1_ofs = 0x1e038000,
4904 .ic_ofs = 0x1e020000,
4905 .disp0_ofs = 0x1e040000,
4906 .disp1_ofs = 0x1e048000,
4907 @@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = {
4908 .cpmem_ofs = 0x07000000,
4909 .srm_ofs = 0x07040000,
4910 .tpm_ofs = 0x07060000,
4911 - .csi0_ofs = 0x07030000,
4912 - .csi1_ofs = 0x07038000,
4913 + .csi0_ofs = 0x06030000,
4914 + .csi1_ofs = 0x06038000,
4915 .ic_ofs = 0x06020000,
4916 .disp0_ofs = 0x06040000,
4917 .disp1_ofs = 0x06048000,
4918 diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
4919 index 8426b7970c14..cc287cf6eb29 100644
4920 --- a/drivers/hwtracing/intel_th/gth.c
4921 +++ b/drivers/hwtracing/intel_th/gth.c
4922 @@ -607,6 +607,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
4923 {
4924 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
4925 int port = othdev->output.port;
4926 + int master;
4927
4928 if (thdev->host_mode)
4929 return;
4930 @@ -615,6 +616,9 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
4931 othdev->output.port = -1;
4932 othdev->output.active = false;
4933 gth->output[port].output = NULL;
4934 + for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
4935 + if (gth->master[master] == port)
4936 + gth->master[master] = -1;
4937 spin_unlock(&gth->gth_lock);
4938 }
4939
4940 diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
4941 index 10bcb5d73f90..9d55e104400c 100644
4942 --- a/drivers/hwtracing/stm/core.c
4943 +++ b/drivers/hwtracing/stm/core.c
4944 @@ -244,6 +244,9 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
4945 ;
4946 if (i == width)
4947 return pos;
4948 +
4949 + /* step over [pos..pos+i) to continue search */
4950 + pos += i;
4951 }
4952
4953 return -1;
4954 @@ -550,7 +553,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
4955 {
4956 struct stm_device *stm = stmf->stm;
4957 struct stp_policy_id *id;
4958 - int ret = -EINVAL;
4959 + int ret = -EINVAL, wlimit = 1;
4960 u32 size;
4961
4962 if (stmf->output.nr_chans)
4963 @@ -578,8 +581,10 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
4964 if (id->__reserved_0 || id->__reserved_1)
4965 goto err_free;
4966
4967 - if (id->width < 1 ||
4968 - id->width > PAGE_SIZE / stm->data->sw_mmiosz)
4969 + if (stm->data->sw_mmiosz)
4970 + wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
4971 +
4972 + if (id->width < 1 || id->width > wlimit)
4973 goto err_free;
4974
4975 ret = stm_file_assign(stmf, id->id, id->width);
4976 diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
4977 index 44deae78913e..4d19254f78c8 100644
4978 --- a/drivers/i2c/busses/i2c-bcm2835.c
4979 +++ b/drivers/i2c/busses/i2c-bcm2835.c
4980 @@ -191,6 +191,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev)
4981 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
4982 }
4983
4984 +static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev)
4985 +{
4986 + i2c_dev->curr_msg = NULL;
4987 + i2c_dev->num_msgs = 0;
4988 +
4989 + i2c_dev->msg_buf = NULL;
4990 + i2c_dev->msg_buf_remaining = 0;
4991 +}
4992 +
4993 /*
4994 * Note about I2C_C_CLEAR on error:
4995 * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in
4996 @@ -291,6 +300,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
4997
4998 time_left = wait_for_completion_timeout(&i2c_dev->completion,
4999 adap->timeout);
5000 +
5001 + bcm2835_i2c_finish_transfer(i2c_dev);
5002 +
5003 if (!time_left) {
5004 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C,
5005 BCM2835_I2C_C_CLEAR);
5006 diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
5007 index b13605718291..d917cefc5a19 100644
5008 --- a/drivers/i2c/busses/i2c-cadence.c
5009 +++ b/drivers/i2c/busses/i2c-cadence.c
5010 @@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
5011 * Check for the message size against FIFO depth and set the
5012 * 'hold bus' bit if it is greater than FIFO depth.
5013 */
5014 - if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
5015 + if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
5016 ctrl_reg |= CDNS_I2C_CR_HOLD;
5017 + else
5018 + ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
5019
5020 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
5021
5022 @@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
5023 * Check for the message size against FIFO depth and set the
5024 * 'hold bus' bit if it is greater than FIFO depth.
5025 */
5026 - if (id->send_count > CDNS_I2C_FIFO_DEPTH)
5027 + if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
5028 ctrl_reg |= CDNS_I2C_CR_HOLD;
5029 + else
5030 + ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
5031 +
5032 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
5033
5034 /* Clear the interrupts in interrupt status register. */
5035 diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
5036 index 60c8561fbe65..ef13b6ce9d8d 100644
5037 --- a/drivers/i2c/busses/i2c-tegra.c
5038 +++ b/drivers/i2c/busses/i2c-tegra.c
5039 @@ -832,7 +832,7 @@ static const struct i2c_algorithm tegra_i2c_algo = {
5040 /* payload size is only 12 bit */
5041 static const struct i2c_adapter_quirks tegra_i2c_quirks = {
5042 .max_read_len = 4096,
5043 - .max_write_len = 4096,
5044 + .max_write_len = 4096 - 12,
5045 };
5046
5047 static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
5048 diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
5049 index f10443f92e4c..4be29ed44755 100644
5050 --- a/drivers/iio/adc/exynos_adc.c
5051 +++ b/drivers/iio/adc/exynos_adc.c
5052 @@ -915,7 +915,7 @@ static int exynos_adc_remove(struct platform_device *pdev)
5053 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
5054 struct exynos_adc *info = iio_priv(indio_dev);
5055
5056 - if (IS_REACHABLE(CONFIG_INPUT)) {
5057 + if (IS_REACHABLE(CONFIG_INPUT) && info->input) {
5058 free_irq(info->tsirq, info);
5059 input_unregister_device(info->input);
5060 }
5061 diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
5062 index cfd252386356..2ea42c04cfd2 100644
5063 --- a/drivers/infiniband/hw/hfi1/hfi.h
5064 +++ b/drivers/infiniband/hw/hfi1/hfi.h
5065 @@ -1425,7 +1425,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
5066 struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
5067 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
5068 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
5069 -void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
5070 +int hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
5071 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
5072 u16 ctxt);
5073 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
5074 diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
5075 index 758d273c32cf..da786eb18558 100644
5076 --- a/drivers/infiniband/hw/hfi1/init.c
5077 +++ b/drivers/infiniband/hw/hfi1/init.c
5078 @@ -213,12 +213,12 @@ static void hfi1_rcd_free(struct kref *kref)
5079 struct hfi1_ctxtdata *rcd =
5080 container_of(kref, struct hfi1_ctxtdata, kref);
5081
5082 - hfi1_free_ctxtdata(rcd->dd, rcd);
5083 -
5084 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
5085 rcd->dd->rcd[rcd->ctxt] = NULL;
5086 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
5087
5088 + hfi1_free_ctxtdata(rcd->dd, rcd);
5089 +
5090 kfree(rcd);
5091 }
5092
5093 @@ -241,10 +241,13 @@ int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
5094 * @rcd: pointer to an initialized rcd data structure
5095 *
5096 * Use this to get a reference after the init.
5097 + *
5098 + * Return : reflect kref_get_unless_zero(), which returns non-zero on
5099 + * increment, otherwise 0.
5100 */
5101 -void hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
5102 +int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
5103 {
5104 - kref_get(&rcd->kref);
5105 + return kref_get_unless_zero(&rcd->kref);
5106 }
5107
5108 /**
5109 @@ -324,7 +327,8 @@ struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
5110 spin_lock_irqsave(&dd->uctxt_lock, flags);
5111 if (dd->rcd[ctxt]) {
5112 rcd = dd->rcd[ctxt];
5113 - hfi1_rcd_get(rcd);
5114 + if (!hfi1_rcd_get(rcd))
5115 + rcd = NULL;
5116 }
5117 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
5118
5119 diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
5120 index 312916f99597..73686c2460ce 100644
5121 --- a/drivers/input/keyboard/cap11xx.c
5122 +++ b/drivers/input/keyboard/cap11xx.c
5123 @@ -75,9 +75,7 @@
5124 struct cap11xx_led {
5125 struct cap11xx_priv *priv;
5126 struct led_classdev cdev;
5127 - struct work_struct work;
5128 u32 reg;
5129 - enum led_brightness new_brightness;
5130 };
5131 #endif
5132
5133 @@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev)
5134 }
5135
5136 #ifdef CONFIG_LEDS_CLASS
5137 -static void cap11xx_led_work(struct work_struct *work)
5138 +static int cap11xx_led_set(struct led_classdev *cdev,
5139 + enum led_brightness value)
5140 {
5141 - struct cap11xx_led *led = container_of(work, struct cap11xx_led, work);
5142 + struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
5143 struct cap11xx_priv *priv = led->priv;
5144 - int value = led->new_brightness;
5145
5146 /*
5147 - * All LEDs share the same duty cycle as this is a HW limitation.
5148 - * Brightness levels per LED are either 0 (OFF) and 1 (ON).
5149 + * All LEDs share the same duty cycle as this is a HW
5150 + * limitation. Brightness levels per LED are either
5151 + * 0 (OFF) and 1 (ON).
5152 */
5153 - regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL,
5154 - BIT(led->reg), value ? BIT(led->reg) : 0);
5155 -}
5156 -
5157 -static void cap11xx_led_set(struct led_classdev *cdev,
5158 - enum led_brightness value)
5159 -{
5160 - struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
5161 -
5162 - if (led->new_brightness == value)
5163 - return;
5164 -
5165 - led->new_brightness = value;
5166 - schedule_work(&led->work);
5167 + return regmap_update_bits(priv->regmap,
5168 + CAP11XX_REG_LED_OUTPUT_CONTROL,
5169 + BIT(led->reg),
5170 + value ? BIT(led->reg) : 0);
5171 }
5172
5173 static int cap11xx_init_leds(struct device *dev,
5174 @@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev,
5175 led->cdev.default_trigger =
5176 of_get_property(child, "linux,default-trigger", NULL);
5177 led->cdev.flags = 0;
5178 - led->cdev.brightness_set = cap11xx_led_set;
5179 + led->cdev.brightness_set_blocking = cap11xx_led_set;
5180 led->cdev.max_brightness = 1;
5181 led->cdev.brightness = LED_OFF;
5182
5183 @@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev,
5184 led->reg = reg;
5185 led->priv = priv;
5186
5187 - INIT_WORK(&led->work, cap11xx_led_work);
5188 -
5189 error = devm_led_classdev_register(dev, &led->cdev);
5190 if (error) {
5191 of_node_put(child);
5192 diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
5193 index 403452ef00e6..3d1cb7bf5e35 100644
5194 --- a/drivers/input/keyboard/matrix_keypad.c
5195 +++ b/drivers/input/keyboard/matrix_keypad.c
5196 @@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
5197 keypad->stopped = true;
5198 spin_unlock_irq(&keypad->lock);
5199
5200 - flush_work(&keypad->work.work);
5201 + flush_delayed_work(&keypad->work);
5202 /*
5203 * matrix_keypad_scan() will leave IRQs enabled;
5204 * we should disable them now.
5205 diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
5206 index babcfb165e4f..3b85631fde91 100644
5207 --- a/drivers/input/keyboard/st-keyscan.c
5208 +++ b/drivers/input/keyboard/st-keyscan.c
5209 @@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev)
5210
5211 input_dev->id.bustype = BUS_HOST;
5212
5213 + keypad_data->input_dev = input_dev;
5214 +
5215 error = keypad_matrix_key_parse_dt(keypad_data);
5216 if (error)
5217 return error;
5218 @@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev)
5219
5220 input_set_drvdata(input_dev, keypad_data);
5221
5222 - keypad_data->input_dev = input_dev;
5223 -
5224 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5225 keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
5226 if (IS_ERR(keypad_data->base))
5227 diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
5228 index 55da191ae550..dbb6d9e1b947 100644
5229 --- a/drivers/input/misc/pwm-vibra.c
5230 +++ b/drivers/input/misc/pwm-vibra.c
5231 @@ -34,6 +34,7 @@ struct pwm_vibrator {
5232 struct work_struct play_work;
5233 u16 level;
5234 u32 direction_duty_cycle;
5235 + bool vcc_on;
5236 };
5237
5238 static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
5239 @@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
5240 struct pwm_state state;
5241 int err;
5242
5243 - err = regulator_enable(vibrator->vcc);
5244 - if (err) {
5245 - dev_err(pdev, "failed to enable regulator: %d", err);
5246 - return err;
5247 + if (!vibrator->vcc_on) {
5248 + err = regulator_enable(vibrator->vcc);
5249 + if (err) {
5250 + dev_err(pdev, "failed to enable regulator: %d", err);
5251 + return err;
5252 + }
5253 + vibrator->vcc_on = true;
5254 }
5255
5256 pwm_get_state(vibrator->pwm, &state);
5257 @@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
5258
5259 static void pwm_vibrator_stop(struct pwm_vibrator *vibrator)
5260 {
5261 - regulator_disable(vibrator->vcc);
5262 -
5263 if (vibrator->pwm_dir)
5264 pwm_disable(vibrator->pwm_dir);
5265 pwm_disable(vibrator->pwm);
5266 +
5267 + if (vibrator->vcc_on) {
5268 + regulator_disable(vibrator->vcc);
5269 + vibrator->vcc_on = false;
5270 + }
5271 }
5272
5273 static void pwm_vibrator_play_work(struct work_struct *work)
5274 diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
5275 index c62cceb97bb1..5e8d8384aa2a 100644
5276 --- a/drivers/input/serio/ps2-gpio.c
5277 +++ b/drivers/input/serio/ps2-gpio.c
5278 @@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio)
5279 {
5280 struct ps2_gpio_data *drvdata = serio->port_data;
5281
5282 + flush_delayed_work(&drvdata->tx_work);
5283 disable_irq(drvdata->irq);
5284 }
5285
5286 diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
5287 index 0e65f609352e..83364fedbf0a 100644
5288 --- a/drivers/irqchip/irq-brcmstb-l2.c
5289 +++ b/drivers/irqchip/irq-brcmstb-l2.c
5290 @@ -129,8 +129,9 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
5291 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
5292 struct irq_chip_type *ct = irq_data_get_chip_type(d);
5293 struct brcmstb_l2_intc_data *b = gc->private;
5294 + unsigned long flags;
5295
5296 - irq_gc_lock(gc);
5297 + irq_gc_lock_irqsave(gc, flags);
5298 /* Save the current mask */
5299 b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
5300
5301 @@ -139,7 +140,7 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
5302 irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
5303 irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
5304 }
5305 - irq_gc_unlock(gc);
5306 + irq_gc_unlock_irqrestore(gc, flags);
5307 }
5308
5309 static void brcmstb_l2_intc_resume(struct irq_data *d)
5310 @@ -147,8 +148,9 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
5311 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
5312 struct irq_chip_type *ct = irq_data_get_chip_type(d);
5313 struct brcmstb_l2_intc_data *b = gc->private;
5314 + unsigned long flags;
5315
5316 - irq_gc_lock(gc);
5317 + irq_gc_lock_irqsave(gc, flags);
5318 if (ct->chip.irq_ack) {
5319 /* Clear unmasked non-wakeup interrupts */
5320 irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
5321 @@ -158,7 +160,7 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
5322 /* Restore the saved mask */
5323 irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
5324 irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
5325 - irq_gc_unlock(gc);
5326 + irq_gc_unlock_irqrestore(gc, flags);
5327 }
5328
5329 static int __init brcmstb_l2_intc_of_init(struct device_node *np,
5330 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
5331 index 15579cba1a88..78970cdf2ef6 100644
5332 --- a/drivers/irqchip/irq-gic-v3-its.c
5333 +++ b/drivers/irqchip/irq-gic-v3-its.c
5334 @@ -1893,6 +1893,8 @@ static int its_alloc_tables(struct its_node *its)
5335 indirect = its_parse_indirect_baser(its, baser,
5336 psz, &order,
5337 its->device_ids);
5338 + break;
5339 +
5340 case GITS_BASER_TYPE_VCPU:
5341 indirect = its_parse_indirect_baser(its, baser,
5342 psz, &order,
5343 diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
5344 index 8ab077ff58f4..96bcabfebc23 100644
5345 --- a/drivers/mailbox/bcm-flexrm-mailbox.c
5346 +++ b/drivers/mailbox/bcm-flexrm-mailbox.c
5347 @@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan)
5348
5349 /* Clear ring flush state */
5350 timeout = 1000; /* timeout of 1s */
5351 - writel_relaxed(0x0, ring + RING_CONTROL);
5352 + writel_relaxed(0x0, ring->regs + RING_CONTROL);
5353 do {
5354 - if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
5355 + if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
5356 FLUSH_DONE_MASK))
5357 break;
5358 mdelay(1);
5359 diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
5360 index 22944aa7d8e5..4ca3e3d3f9c7 100644
5361 --- a/drivers/md/bcache/request.c
5362 +++ b/drivers/md/bcache/request.c
5363 @@ -392,10 +392,11 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
5364
5365 /*
5366 * Flag for bypass if the IO is for read-ahead or background,
5367 - * unless the read-ahead request is for metadata (eg, for gfs2).
5368 + * unless the read-ahead request is for metadata
5369 + * (eg, for gfs2 or xfs).
5370 */
5371 if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
5372 - !(bio->bi_opf & REQ_META))
5373 + !(bio->bi_opf & (REQ_META|REQ_PRIO)))
5374 goto skip;
5375
5376 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
5377 @@ -877,7 +878,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
5378 }
5379
5380 if (!(bio->bi_opf & REQ_RAHEAD) &&
5381 - !(bio->bi_opf & REQ_META) &&
5382 + !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
5383 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
5384 reada = min_t(sector_t, dc->readahead >> 9,
5385 get_capacity(bio->bi_disk) - bio_end_sector(bio));
5386 diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
5387 index d2b9fdbc8994..e75dc33339f6 100644
5388 --- a/drivers/md/bcache/writeback.h
5389 +++ b/drivers/md/bcache/writeback.h
5390 @@ -63,6 +63,9 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
5391 in_use > CUTOFF_WRITEBACK_SYNC)
5392 return false;
5393
5394 + if (bio_op(bio) == REQ_OP_DISCARD)
5395 + return false;
5396 +
5397 if (dc->partial_stripes_expensive &&
5398 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
5399 bio_sectors(bio)))
5400 diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
5401 index e1fa6baf4e8e..96d5fb3f6199 100644
5402 --- a/drivers/md/dm-integrity.c
5403 +++ b/drivers/md/dm-integrity.c
5404 @@ -1357,8 +1357,8 @@ again:
5405 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
5406 if (unlikely(r)) {
5407 if (r > 0) {
5408 - DMERR("Checksum failed at sector 0x%llx",
5409 - (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
5410 + DMERR_LIMIT("Checksum failed at sector 0x%llx",
5411 + (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
5412 r = -EILSEQ;
5413 atomic64_inc(&ic->number_of_mismatches);
5414 }
5415 @@ -1550,8 +1550,8 @@ retry_kmap:
5416
5417 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
5418 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
5419 - DMERR("Checksum failed when reading from journal, at sector 0x%llx",
5420 - (unsigned long long)logical_sector);
5421 + DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
5422 + (unsigned long long)logical_sector);
5423 }
5424 }
5425 #endif
5426 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
5427 index 9df1334608b7..25e97de36717 100644
5428 --- a/drivers/md/raid10.c
5429 +++ b/drivers/md/raid10.c
5430 @@ -3959,6 +3959,8 @@ static int raid10_run(struct mddev *mddev)
5431 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5432 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5433 "reshape");
5434 + if (!mddev->sync_thread)
5435 + goto out_free_conf;
5436 }
5437
5438 return 0;
5439 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
5440 index 45a3551d3afd..ae38895c44b2 100644
5441 --- a/drivers/md/raid5.c
5442 +++ b/drivers/md/raid5.c
5443 @@ -7390,6 +7390,8 @@ static int raid5_run(struct mddev *mddev)
5444 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5445 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5446 "reshape");
5447 + if (!mddev->sync_thread)
5448 + goto abort;
5449 }
5450
5451 /* Ok, everything is just fine now */
5452 diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
5453 index 886a2d8d5c6c..9d4a81bb0e59 100644
5454 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
5455 +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
5456 @@ -145,7 +145,6 @@ static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
5457 return;
5458
5459 check_once = true;
5460 - WARN_ON(1);
5461
5462 pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
5463 if (vb->vb2_queue->allow_zero_bytesused)
5464 diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
5465 index 10d584ce538d..9ee1c1360ab8 100644
5466 --- a/drivers/media/dvb-frontends/lgdt330x.c
5467 +++ b/drivers/media/dvb-frontends/lgdt330x.c
5468 @@ -783,7 +783,7 @@ static int lgdt3303_read_status(struct dvb_frontend *fe,
5469
5470 if ((buf[0] & 0x02) == 0x00)
5471 *status |= FE_HAS_SYNC;
5472 - if ((buf[0] & 0xfd) == 0x01)
5473 + if ((buf[0] & 0x01) == 0x01)
5474 *status |= FE_HAS_VITERBI | FE_HAS_LOCK;
5475 break;
5476 default:
5477 diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
5478 index 8e7a2a59cd32..d5c0ffc55d46 100644
5479 --- a/drivers/media/i2c/ov5640.c
5480 +++ b/drivers/media/i2c/ov5640.c
5481 @@ -1759,7 +1759,7 @@ static void ov5640_reset(struct ov5640_dev *sensor)
5482 usleep_range(1000, 2000);
5483
5484 gpiod_set_value_cansleep(sensor->reset_gpio, 0);
5485 - usleep_range(5000, 10000);
5486 + usleep_range(20000, 25000);
5487 }
5488
5489 static int ov5640_set_power_on(struct ov5640_dev *sensor)
5490 diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
5491 index 4b2e3de7856e..c4fc8e7d365a 100644
5492 --- a/drivers/media/platform/vimc/Makefile
5493 +++ b/drivers/media/platform/vimc/Makefile
5494 @@ -5,6 +5,7 @@ vimc_common-objs := vimc-common.o
5495 vimc_debayer-objs := vimc-debayer.o
5496 vimc_scaler-objs := vimc-scaler.o
5497 vimc_sensor-objs := vimc-sensor.o
5498 +vimc_streamer-objs := vimc-streamer.o
5499
5500 obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \
5501 - vimc_scaler.o vimc_sensor.o
5502 + vimc_scaler.o vimc_sensor.o vimc_streamer.o
5503 diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
5504 index ec68feaac378..65d657daf66f 100644
5505 --- a/drivers/media/platform/vimc/vimc-capture.c
5506 +++ b/drivers/media/platform/vimc/vimc-capture.c
5507 @@ -24,6 +24,7 @@
5508 #include <media/videobuf2-vmalloc.h>
5509
5510 #include "vimc-common.h"
5511 +#include "vimc-streamer.h"
5512
5513 #define VIMC_CAP_DRV_NAME "vimc-capture"
5514
5515 @@ -44,7 +45,7 @@ struct vimc_cap_device {
5516 spinlock_t qlock;
5517 struct mutex lock;
5518 u32 sequence;
5519 - struct media_pipeline pipe;
5520 + struct vimc_stream stream;
5521 };
5522
5523 static const struct v4l2_pix_format fmt_default = {
5524 @@ -248,14 +249,13 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
5525 vcap->sequence = 0;
5526
5527 /* Start the media pipeline */
5528 - ret = media_pipeline_start(entity, &vcap->pipe);
5529 + ret = media_pipeline_start(entity, &vcap->stream.pipe);
5530 if (ret) {
5531 vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
5532 return ret;
5533 }
5534
5535 - /* Enable streaming from the pipe */
5536 - ret = vimc_pipeline_s_stream(&vcap->vdev.entity, 1);
5537 + ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
5538 if (ret) {
5539 media_pipeline_stop(entity);
5540 vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
5541 @@ -273,8 +273,7 @@ static void vimc_cap_stop_streaming(struct vb2_queue *vq)
5542 {
5543 struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
5544
5545 - /* Disable streaming from the pipe */
5546 - vimc_pipeline_s_stream(&vcap->vdev.entity, 0);
5547 + vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0);
5548
5549 /* Stop the media pipeline */
5550 media_pipeline_stop(&vcap->vdev.entity);
5551 @@ -355,8 +354,8 @@ static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
5552 kfree(vcap);
5553 }
5554
5555 -static void vimc_cap_process_frame(struct vimc_ent_device *ved,
5556 - struct media_pad *sink, const void *frame)
5557 +static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
5558 + const void *frame)
5559 {
5560 struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
5561 ved);
5562 @@ -370,7 +369,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
5563 typeof(*vimc_buf), list);
5564 if (!vimc_buf) {
5565 spin_unlock(&vcap->qlock);
5566 - return;
5567 + return ERR_PTR(-EAGAIN);
5568 }
5569
5570 /* Remove this entry from the list */
5571 @@ -391,6 +390,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
5572 vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0,
5573 vcap->format.sizeimage);
5574 vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
5575 + return NULL;
5576 }
5577
5578 static int vimc_cap_comp_bind(struct device *comp, struct device *master,
5579 diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
5580 index 617415c224fe..204aa6f554e4 100644
5581 --- a/drivers/media/platform/vimc/vimc-common.c
5582 +++ b/drivers/media/platform/vimc/vimc-common.c
5583 @@ -207,41 +207,6 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
5584 }
5585 EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
5586
5587 -int vimc_propagate_frame(struct media_pad *src, const void *frame)
5588 -{
5589 - struct media_link *link;
5590 -
5591 - if (!(src->flags & MEDIA_PAD_FL_SOURCE))
5592 - return -EINVAL;
5593 -
5594 - /* Send this frame to all sink pads that are direct linked */
5595 - list_for_each_entry(link, &src->entity->links, list) {
5596 - if (link->source == src &&
5597 - (link->flags & MEDIA_LNK_FL_ENABLED)) {
5598 - struct vimc_ent_device *ved = NULL;
5599 - struct media_entity *entity = link->sink->entity;
5600 -
5601 - if (is_media_entity_v4l2_subdev(entity)) {
5602 - struct v4l2_subdev *sd =
5603 - container_of(entity, struct v4l2_subdev,
5604 - entity);
5605 - ved = v4l2_get_subdevdata(sd);
5606 - } else if (is_media_entity_v4l2_video_device(entity)) {
5607 - struct video_device *vdev =
5608 - container_of(entity,
5609 - struct video_device,
5610 - entity);
5611 - ved = video_get_drvdata(vdev);
5612 - }
5613 - if (ved && ved->process_frame)
5614 - ved->process_frame(ved, link->sink, frame);
5615 - }
5616 - }
5617 -
5618 - return 0;
5619 -}
5620 -EXPORT_SYMBOL_GPL(vimc_propagate_frame);
5621 -
5622 /* Helper function to allocate and initialize pads */
5623 struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
5624 {
5625 diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
5626 index 2e9981b18166..6ed969d9efbb 100644
5627 --- a/drivers/media/platform/vimc/vimc-common.h
5628 +++ b/drivers/media/platform/vimc/vimc-common.h
5629 @@ -113,23 +113,12 @@ struct vimc_pix_map {
5630 struct vimc_ent_device {
5631 struct media_entity *ent;
5632 struct media_pad *pads;
5633 - void (*process_frame)(struct vimc_ent_device *ved,
5634 - struct media_pad *sink, const void *frame);
5635 + void * (*process_frame)(struct vimc_ent_device *ved,
5636 + const void *frame);
5637 void (*vdev_get_format)(struct vimc_ent_device *ved,
5638 struct v4l2_pix_format *fmt);
5639 };
5640
5641 -/**
5642 - * vimc_propagate_frame - propagate a frame through the topology
5643 - *
5644 - * @src: the source pad where the frame is being originated
5645 - * @frame: the frame to be propagated
5646 - *
5647 - * This function will call the process_frame callback from the vimc_ent_device
5648 - * struct of the nodes directly connected to the @src pad
5649 - */
5650 -int vimc_propagate_frame(struct media_pad *src, const void *frame);
5651 -
5652 /**
5653 * vimc_pads_init - initialize pads
5654 *
5655 diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
5656 index 77887f66f323..7d77c63b99d2 100644
5657 --- a/drivers/media/platform/vimc/vimc-debayer.c
5658 +++ b/drivers/media/platform/vimc/vimc-debayer.c
5659 @@ -321,7 +321,6 @@ static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb,
5660 static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
5661 {
5662 struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
5663 - int ret;
5664
5665 if (enable) {
5666 const struct vimc_pix_map *vpix;
5667 @@ -351,22 +350,10 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
5668 if (!vdeb->src_frame)
5669 return -ENOMEM;
5670
5671 - /* Turn the stream on in the subdevices directly connected */
5672 - ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 1);
5673 - if (ret) {
5674 - vfree(vdeb->src_frame);
5675 - vdeb->src_frame = NULL;
5676 - return ret;
5677 - }
5678 } else {
5679 if (!vdeb->src_frame)
5680 return 0;
5681
5682 - /* Disable streaming from the pipe */
5683 - ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 0);
5684 - if (ret)
5685 - return ret;
5686 -
5687 vfree(vdeb->src_frame);
5688 vdeb->src_frame = NULL;
5689 }
5690 @@ -480,9 +467,8 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
5691 }
5692 }
5693
5694 -static void vimc_deb_process_frame(struct vimc_ent_device *ved,
5695 - struct media_pad *sink,
5696 - const void *sink_frame)
5697 +static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
5698 + const void *sink_frame)
5699 {
5700 struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
5701 ved);
5702 @@ -491,7 +477,7 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved,
5703
5704 /* If the stream in this node is not active, just return */
5705 if (!vdeb->src_frame)
5706 - return;
5707 + return ERR_PTR(-EINVAL);
5708
5709 for (i = 0; i < vdeb->sink_fmt.height; i++)
5710 for (j = 0; j < vdeb->sink_fmt.width; j++) {
5711 @@ -499,12 +485,8 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved,
5712 vdeb->set_rgb_src(vdeb, i, j, rgb);
5713 }
5714
5715 - /* Propagate the frame through all source pads */
5716 - for (i = 1; i < vdeb->sd.entity.num_pads; i++) {
5717 - struct media_pad *pad = &vdeb->sd.entity.pads[i];
5718 + return vdeb->src_frame;
5719
5720 - vimc_propagate_frame(pad, vdeb->src_frame);
5721 - }
5722 }
5723
5724 static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
5725 diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
5726 index b0952ee86296..39b2a73dfcc1 100644
5727 --- a/drivers/media/platform/vimc/vimc-scaler.c
5728 +++ b/drivers/media/platform/vimc/vimc-scaler.c
5729 @@ -217,7 +217,6 @@ static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
5730 static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
5731 {
5732 struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
5733 - int ret;
5734
5735 if (enable) {
5736 const struct vimc_pix_map *vpix;
5737 @@ -245,22 +244,10 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
5738 if (!vsca->src_frame)
5739 return -ENOMEM;
5740
5741 - /* Turn the stream on in the subdevices directly connected */
5742 - ret = vimc_pipeline_s_stream(&vsca->sd.entity, 1);
5743 - if (ret) {
5744 - vfree(vsca->src_frame);
5745 - vsca->src_frame = NULL;
5746 - return ret;
5747 - }
5748 } else {
5749 if (!vsca->src_frame)
5750 return 0;
5751
5752 - /* Disable streaming from the pipe */
5753 - ret = vimc_pipeline_s_stream(&vsca->sd.entity, 0);
5754 - if (ret)
5755 - return ret;
5756 -
5757 vfree(vsca->src_frame);
5758 vsca->src_frame = NULL;
5759 }
5760 @@ -346,26 +333,19 @@ static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
5761 vimc_sca_scale_pix(vsca, i, j, sink_frame);
5762 }
5763
5764 -static void vimc_sca_process_frame(struct vimc_ent_device *ved,
5765 - struct media_pad *sink,
5766 - const void *sink_frame)
5767 +static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
5768 + const void *sink_frame)
5769 {
5770 struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
5771 ved);
5772 - unsigned int i;
5773
5774 /* If the stream in this node is not active, just return */
5775 if (!vsca->src_frame)
5776 - return;
5777 + return ERR_PTR(-EINVAL);
5778
5779 vimc_sca_fill_src_frame(vsca, sink_frame);
5780
5781 - /* Propagate the frame through all source pads */
5782 - for (i = 1; i < vsca->sd.entity.num_pads; i++) {
5783 - struct media_pad *pad = &vsca->sd.entity.pads[i];
5784 -
5785 - vimc_propagate_frame(pad, vsca->src_frame);
5786 - }
5787 + return vsca->src_frame;
5788 };
5789
5790 static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
5791 diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
5792 index b2b89315e7ba..9e0d70e9f119 100644
5793 --- a/drivers/media/platform/vimc/vimc-sensor.c
5794 +++ b/drivers/media/platform/vimc/vimc-sensor.c
5795 @@ -16,8 +16,6 @@
5796 */
5797
5798 #include <linux/component.h>
5799 -#include <linux/freezer.h>
5800 -#include <linux/kthread.h>
5801 #include <linux/module.h>
5802 #include <linux/mod_devicetable.h>
5803 #include <linux/platform_device.h>
5804 @@ -201,38 +199,27 @@ static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = {
5805 .set_fmt = vimc_sen_set_fmt,
5806 };
5807
5808 -static int vimc_sen_tpg_thread(void *data)
5809 +static void *vimc_sen_process_frame(struct vimc_ent_device *ved,
5810 + const void *sink_frame)
5811 {
5812 - struct vimc_sen_device *vsen = data;
5813 - unsigned int i;
5814 -
5815 - set_freezable();
5816 - set_current_state(TASK_UNINTERRUPTIBLE);
5817 -
5818 - for (;;) {
5819 - try_to_freeze();
5820 - if (kthread_should_stop())
5821 - break;
5822 -
5823 - tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
5824 + struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device,
5825 + ved);
5826 + const struct vimc_pix_map *vpix;
5827 + unsigned int frame_size;
5828
5829 - /* Send the frame to all source pads */
5830 - for (i = 0; i < vsen->sd.entity.num_pads; i++)
5831 - vimc_propagate_frame(&vsen->sd.entity.pads[i],
5832 - vsen->frame);
5833 + /* Calculate the frame size */
5834 + vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
5835 + frame_size = vsen->mbus_format.width * vpix->bpp *
5836 + vsen->mbus_format.height;
5837
5838 - /* 60 frames per second */
5839 - schedule_timeout(HZ/60);
5840 - }
5841 -
5842 - return 0;
5843 + tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
5844 + return vsen->frame;
5845 }
5846
5847 static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
5848 {
5849 struct vimc_sen_device *vsen =
5850 container_of(sd, struct vimc_sen_device, sd);
5851 - int ret;
5852
5853 if (enable) {
5854 const struct vimc_pix_map *vpix;
5855 @@ -258,26 +245,8 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
5856 /* configure the test pattern generator */
5857 vimc_sen_tpg_s_format(vsen);
5858
5859 - /* Initialize the image generator thread */
5860 - vsen->kthread_sen = kthread_run(vimc_sen_tpg_thread, vsen,
5861 - "%s-sen", vsen->sd.v4l2_dev->name);
5862 - if (IS_ERR(vsen->kthread_sen)) {
5863 - dev_err(vsen->dev, "%s: kernel_thread() failed\n",
5864 - vsen->sd.name);
5865 - vfree(vsen->frame);
5866 - vsen->frame = NULL;
5867 - return PTR_ERR(vsen->kthread_sen);
5868 - }
5869 } else {
5870 - if (!vsen->kthread_sen)
5871 - return 0;
5872 -
5873 - /* Stop image generator */
5874 - ret = kthread_stop(vsen->kthread_sen);
5875 - if (ret)
5876 - return ret;
5877
5878 - vsen->kthread_sen = NULL;
5879 vfree(vsen->frame);
5880 vsen->frame = NULL;
5881 return 0;
5882 @@ -393,6 +362,7 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master,
5883 if (ret)
5884 goto err_free_hdl;
5885
5886 + vsen->ved.process_frame = vimc_sen_process_frame;
5887 dev_set_drvdata(comp, &vsen->ved);
5888 vsen->dev = comp;
5889
5890 diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
5891 new file mode 100644
5892 index 000000000000..fcc897fb247b
5893 --- /dev/null
5894 +++ b/drivers/media/platform/vimc/vimc-streamer.c
5895 @@ -0,0 +1,188 @@
5896 +// SPDX-License-Identifier: GPL-2.0+
5897 +/*
5898 + * vimc-streamer.c Virtual Media Controller Driver
5899 + *
5900 + * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
5901 + *
5902 + */
5903 +
5904 +#include <linux/init.h>
5905 +#include <linux/module.h>
5906 +#include <linux/freezer.h>
5907 +#include <linux/kthread.h>
5908 +
5909 +#include "vimc-streamer.h"
5910 +
5911 +/**
5912 + * vimc_get_source_entity - get the entity connected with the first sink pad
5913 + *
5914 + * @ent: reference media_entity
5915 + *
5916 + * Helper function that returns the media entity containing the source pad
5917 + * linked with the first sink pad from the given media entity pad list.
5918 + */
5919 +static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
5920 +{
5921 + struct media_pad *pad;
5922 + int i;
5923 +
5924 + for (i = 0; i < ent->num_pads; i++) {
5925 + if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
5926 + continue;
5927 + pad = media_entity_remote_pad(&ent->pads[i]);
5928 + return pad ? pad->entity : NULL;
5929 + }
5930 + return NULL;
5931 +}
5932 +
5933 +/*
5934 + * vimc_streamer_pipeline_terminate - Disable stream in all ved in stream
5935 + *
5936 + * @stream: the pointer to the stream structure with the pipeline to be
5937 + * disabled.
5938 + *
5939 + * Calls s_stream to disable the stream in each entity of the pipeline
5940 + *
5941 + */
5942 +static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
5943 +{
5944 + struct media_entity *entity;
5945 + struct v4l2_subdev *sd;
5946 +
5947 + while (stream->pipe_size) {
5948 + stream->pipe_size--;
5949 + entity = stream->ved_pipeline[stream->pipe_size]->ent;
5950 + entity = vimc_get_source_entity(entity);
5951 + stream->ved_pipeline[stream->pipe_size] = NULL;
5952 +
5953 + if (!is_media_entity_v4l2_subdev(entity))
5954 + continue;
5955 +
5956 + sd = media_entity_to_v4l2_subdev(entity);
5957 + v4l2_subdev_call(sd, video, s_stream, 0);
5958 + }
5959 +}
5960 +
5961 +/*
5962 + * vimc_streamer_pipeline_init - initializes the stream structure
5963 + *
5964 + * @stream: the pointer to the stream structure to be initialized
5965 + * @ved: the pointer to the vimc entity initializing the stream
5966 + *
5967 + * Initializes the stream structure. Walks through the entity graph to
5968 + * construct the pipeline used later on the streamer thread.
5969 + * Calls s_stream to enable stream in all entities of the pipeline.
5970 + */
5971 +static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
5972 + struct vimc_ent_device *ved)
5973 +{
5974 + struct media_entity *entity;
5975 + struct video_device *vdev;
5976 + struct v4l2_subdev *sd;
5977 + int ret = 0;
5978 +
5979 + stream->pipe_size = 0;
5980 + while (stream->pipe_size < VIMC_STREAMER_PIPELINE_MAX_SIZE) {
5981 + if (!ved) {
5982 + vimc_streamer_pipeline_terminate(stream);
5983 + return -EINVAL;
5984 + }
5985 + stream->ved_pipeline[stream->pipe_size++] = ved;
5986 +
5987 + entity = vimc_get_source_entity(ved->ent);
5988 + /* Check if the end of the pipeline was reached*/
5989 + if (!entity)
5990 + return 0;
5991 +
5992 + if (is_media_entity_v4l2_subdev(entity)) {
5993 + sd = media_entity_to_v4l2_subdev(entity);
5994 + ret = v4l2_subdev_call(sd, video, s_stream, 1);
5995 + if (ret && ret != -ENOIOCTLCMD) {
5996 + vimc_streamer_pipeline_terminate(stream);
5997 + return ret;
5998 + }
5999 + ved = v4l2_get_subdevdata(sd);
6000 + } else {
6001 + vdev = container_of(entity,
6002 + struct video_device,
6003 + entity);
6004 + ved = video_get_drvdata(vdev);
6005 + }
6006 + }
6007 +
6008 + vimc_streamer_pipeline_terminate(stream);
6009 + return -EINVAL;
6010 +}
6011 +
6012 +static int vimc_streamer_thread(void *data)
6013 +{
6014 + struct vimc_stream *stream = data;
6015 + int i;
6016 +
6017 + set_freezable();
6018 + set_current_state(TASK_UNINTERRUPTIBLE);
6019 +
6020 + for (;;) {
6021 + try_to_freeze();
6022 + if (kthread_should_stop())
6023 + break;
6024 +
6025 + for (i = stream->pipe_size - 1; i >= 0; i--) {
6026 + stream->frame = stream->ved_pipeline[i]->process_frame(
6027 + stream->ved_pipeline[i],
6028 + stream->frame);
6029 + if (!stream->frame)
6030 + break;
6031 + if (IS_ERR(stream->frame))
6032 + break;
6033 + }
6034 + //wait for 60hz
6035 + schedule_timeout(HZ / 60);
6036 + }
6037 +
6038 + return 0;
6039 +}
6040 +
6041 +int vimc_streamer_s_stream(struct vimc_stream *stream,
6042 + struct vimc_ent_device *ved,
6043 + int enable)
6044 +{
6045 + int ret;
6046 +
6047 + if (!stream || !ved)
6048 + return -EINVAL;
6049 +
6050 + if (enable) {
6051 + if (stream->kthread)
6052 + return 0;
6053 +
6054 + ret = vimc_streamer_pipeline_init(stream, ved);
6055 + if (ret)
6056 + return ret;
6057 +
6058 + stream->kthread = kthread_run(vimc_streamer_thread, stream,
6059 + "vimc-streamer thread");
6060 +
6061 + if (IS_ERR(stream->kthread))
6062 + return PTR_ERR(stream->kthread);
6063 +
6064 + } else {
6065 + if (!stream->kthread)
6066 + return 0;
6067 +
6068 + ret = kthread_stop(stream->kthread);
6069 + if (ret)
6070 + return ret;
6071 +
6072 + stream->kthread = NULL;
6073 +
6074 + vimc_streamer_pipeline_terminate(stream);
6075 + }
6076 +
6077 + return 0;
6078 +}
6079 +EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
6080 +
6081 +MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer");
6082 +MODULE_AUTHOR("Lucas A. M. Magalhães <lucmaga@gmail.com>");
6083 +MODULE_LICENSE("GPL");
6084 diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/platform/vimc/vimc-streamer.h
6085 new file mode 100644
6086 index 000000000000..752af2e2d5a2
6087 --- /dev/null
6088 +++ b/drivers/media/platform/vimc/vimc-streamer.h
6089 @@ -0,0 +1,38 @@
6090 +/* SPDX-License-Identifier: GPL-2.0+ */
6091 +/*
6092 + * vimc-streamer.h Virtual Media Controller Driver
6093 + *
6094 + * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
6095 + *
6096 + */
6097 +
6098 +#ifndef _VIMC_STREAMER_H_
6099 +#define _VIMC_STREAMER_H_
6100 +
6101 +#include <media/media-device.h>
6102 +
6103 +#include "vimc-common.h"
6104 +
6105 +#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16
6106 +
6107 +struct vimc_stream {
6108 + struct media_pipeline pipe;
6109 + struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE];
6110 + unsigned int pipe_size;
6111 + u8 *frame;
6112 + struct task_struct *kthread;
6113 +};
6114 +
6115 +/**
6116 + * vimc_streamer_s_streamer - start/stop the stream
6117 + *
6118 + * @stream: the pointer to the stream to start or stop
6119 + * @ved: The last entity of the streamer pipeline
6120 + * @enable: any non-zero number start the stream, zero stop
6121 + *
6122 + */
6123 +int vimc_streamer_s_stream(struct vimc_stream *stream,
6124 + struct vimc_ent_device *ved,
6125 + int enable);
6126 +
6127 +#endif //_VIMC_STREAMER_H_
6128 diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
6129 index 86a99f461fd8..ffffb66d51a0 100644
6130 --- a/drivers/media/usb/uvc/uvc_video.c
6131 +++ b/drivers/media/usb/uvc/uvc_video.c
6132 @@ -676,6 +676,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
6133 if (!uvc_hw_timestamps_param)
6134 return;
6135
6136 + /*
6137 + * We will get called from __vb2_queue_cancel() if there are buffers
6138 + * done but not dequeued by the user, but the sample array has already
6139 + * been released at that time. Just bail out in that case.
6140 + */
6141 + if (!clock->samples)
6142 + return;
6143 +
6144 spin_lock_irqsave(&clock->lock, flags);
6145
6146 if (clock->count < clock->size)
6147 diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
6148 index a530972c5a7e..e0173bf4b0dc 100644
6149 --- a/drivers/mfd/sm501.c
6150 +++ b/drivers/mfd/sm501.c
6151 @@ -1145,6 +1145,9 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
6152 lookup = devm_kzalloc(&pdev->dev,
6153 sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
6154 GFP_KERNEL);
6155 + if (!lookup)
6156 + return -ENOMEM;
6157 +
6158 lookup->dev_id = "i2c-gpio";
6159 if (iic->pin_sda < 32)
6160 lookup->table[0].chip_label = "SM501-LOW";
6161 diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
6162 index 3bc0c15d4d85..b83a373e3a8d 100644
6163 --- a/drivers/misc/cxl/guest.c
6164 +++ b/drivers/misc/cxl/guest.c
6165 @@ -267,6 +267,7 @@ static int guest_reset(struct cxl *adapter)
6166 int i, rc;
6167
6168 pr_devel("Adapter reset request\n");
6169 + spin_lock(&adapter->afu_list_lock);
6170 for (i = 0; i < adapter->slices; i++) {
6171 if ((afu = adapter->afu[i])) {
6172 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
6173 @@ -283,6 +284,7 @@ static int guest_reset(struct cxl *adapter)
6174 pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
6175 }
6176 }
6177 + spin_unlock(&adapter->afu_list_lock);
6178 return rc;
6179 }
6180
6181 diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
6182 index b66d832d3233..787a69a2a726 100644
6183 --- a/drivers/misc/cxl/pci.c
6184 +++ b/drivers/misc/cxl/pci.c
6185 @@ -1807,7 +1807,7 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
6186 /* There should only be one entry, but go through the list
6187 * anyway
6188 */
6189 - if (afu->phb == NULL)
6190 + if (afu == NULL || afu->phb == NULL)
6191 return result;
6192
6193 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
6194 @@ -1834,7 +1834,8 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
6195 {
6196 struct cxl *adapter = pci_get_drvdata(pdev);
6197 struct cxl_afu *afu;
6198 - pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result;
6199 + pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
6200 + pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
6201 int i;
6202
6203 /* At this point, we could still have an interrupt pending.
6204 @@ -1845,6 +1846,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
6205
6206 /* If we're permanently dead, give up. */
6207 if (state == pci_channel_io_perm_failure) {
6208 + spin_lock(&adapter->afu_list_lock);
6209 for (i = 0; i < adapter->slices; i++) {
6210 afu = adapter->afu[i];
6211 /*
6212 @@ -1853,6 +1855,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
6213 */
6214 cxl_vphb_error_detected(afu, state);
6215 }
6216 + spin_unlock(&adapter->afu_list_lock);
6217 return PCI_ERS_RESULT_DISCONNECT;
6218 }
6219
6220 @@ -1934,11 +1937,17 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
6221 * * In slot_reset, free the old resources and allocate new ones.
6222 * * In resume, clear the flag to allow things to start.
6223 */
6224 +
6225 + /* Make sure no one else changes the afu list */
6226 + spin_lock(&adapter->afu_list_lock);
6227 +
6228 for (i = 0; i < adapter->slices; i++) {
6229 afu = adapter->afu[i];
6230
6231 - afu_result = cxl_vphb_error_detected(afu, state);
6232 + if (afu == NULL)
6233 + continue;
6234
6235 + afu_result = cxl_vphb_error_detected(afu, state);
6236 cxl_context_detach_all(afu);
6237 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
6238 pci_deconfigure_afu(afu);
6239 @@ -1950,6 +1959,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
6240 (result == PCI_ERS_RESULT_NEED_RESET))
6241 result = PCI_ERS_RESULT_NONE;
6242 }
6243 + spin_unlock(&adapter->afu_list_lock);
6244
6245 /* should take the context lock here */
6246 if (cxl_adapter_context_lock(adapter) != 0)
6247 @@ -1982,14 +1992,18 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
6248 */
6249 cxl_adapter_context_unlock(adapter);
6250
6251 + spin_lock(&adapter->afu_list_lock);
6252 for (i = 0; i < adapter->slices; i++) {
6253 afu = adapter->afu[i];
6254
6255 + if (afu == NULL)
6256 + continue;
6257 +
6258 if (pci_configure_afu(afu, adapter, pdev))
6259 - goto err;
6260 + goto err_unlock;
6261
6262 if (cxl_afu_select_best_mode(afu))
6263 - goto err;
6264 + goto err_unlock;
6265
6266 if (afu->phb == NULL)
6267 continue;
6268 @@ -2001,16 +2015,16 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
6269 ctx = cxl_get_context(afu_dev);
6270
6271 if (ctx && cxl_release_context(ctx))
6272 - goto err;
6273 + goto err_unlock;
6274
6275 ctx = cxl_dev_context_init(afu_dev);
6276 if (IS_ERR(ctx))
6277 - goto err;
6278 + goto err_unlock;
6279
6280 afu_dev->dev.archdata.cxl_ctx = ctx;
6281
6282 if (cxl_ops->afu_check_and_enable(afu))
6283 - goto err;
6284 + goto err_unlock;
6285
6286 afu_dev->error_state = pci_channel_io_normal;
6287
6288 @@ -2031,8 +2045,13 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
6289 result = PCI_ERS_RESULT_DISCONNECT;
6290 }
6291 }
6292 +
6293 + spin_unlock(&adapter->afu_list_lock);
6294 return result;
6295
6296 +err_unlock:
6297 + spin_unlock(&adapter->afu_list_lock);
6298 +
6299 err:
6300 /* All the bits that happen in both error_detected and cxl_remove
6301 * should be idempotent, so we don't need to worry about leaving a mix
6302 @@ -2053,10 +2072,11 @@ static void cxl_pci_resume(struct pci_dev *pdev)
6303 * This is not the place to be checking if everything came back up
6304 * properly, because there's no return value: do that in slot_reset.
6305 */
6306 + spin_lock(&adapter->afu_list_lock);
6307 for (i = 0; i < adapter->slices; i++) {
6308 afu = adapter->afu[i];
6309
6310 - if (afu->phb == NULL)
6311 + if (afu == NULL || afu->phb == NULL)
6312 continue;
6313
6314 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
6315 @@ -2065,6 +2085,7 @@ static void cxl_pci_resume(struct pci_dev *pdev)
6316 afu_dev->driver->err_handler->resume(afu_dev);
6317 }
6318 }
6319 + spin_unlock(&adapter->afu_list_lock);
6320 }
6321
6322 static const struct pci_error_handlers cxl_err_handler = {
6323 diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
6324 index fc3872fe7b25..c383322ec2ba 100644
6325 --- a/drivers/misc/mei/bus.c
6326 +++ b/drivers/misc/mei/bus.c
6327 @@ -541,17 +541,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
6328 goto out;
6329 }
6330
6331 - if (!mei_cl_bus_module_get(cldev)) {
6332 - dev_err(&cldev->dev, "get hw module failed");
6333 - ret = -ENODEV;
6334 - goto out;
6335 - }
6336 -
6337 ret = mei_cl_connect(cl, cldev->me_cl, NULL);
6338 - if (ret < 0) {
6339 + if (ret < 0)
6340 dev_err(&cldev->dev, "cannot connect\n");
6341 - mei_cl_bus_module_put(cldev);
6342 - }
6343
6344 out:
6345 mutex_unlock(&bus->device_lock);
6346 @@ -614,7 +606,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
6347 if (err < 0)
6348 dev_err(bus->dev, "Could not disconnect from the ME client\n");
6349
6350 - mei_cl_bus_module_put(cldev);
6351 out:
6352 /* Flush queues and remove any pending read */
6353 mei_cl_flush_queues(cl, NULL);
6354 @@ -725,9 +716,16 @@ static int mei_cl_device_probe(struct device *dev)
6355 if (!id)
6356 return -ENODEV;
6357
6358 + if (!mei_cl_bus_module_get(cldev)) {
6359 + dev_err(&cldev->dev, "get hw module failed");
6360 + return -ENODEV;
6361 + }
6362 +
6363 ret = cldrv->probe(cldev, id);
6364 - if (ret)
6365 + if (ret) {
6366 + mei_cl_bus_module_put(cldev);
6367 return ret;
6368 + }
6369
6370 __module_get(THIS_MODULE);
6371 return 0;
6372 @@ -755,6 +753,7 @@ static int mei_cl_device_remove(struct device *dev)
6373
6374 mei_cldev_unregister_callbacks(cldev);
6375
6376 + mei_cl_bus_module_put(cldev);
6377 module_put(THIS_MODULE);
6378 dev->driver = NULL;
6379 return ret;
6380 diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
6381 index e56f3e72d57a..d39cc2909474 100644
6382 --- a/drivers/misc/mei/hbm.c
6383 +++ b/drivers/misc/mei/hbm.c
6384 @@ -986,29 +986,36 @@ static void mei_hbm_config_features(struct mei_device *dev)
6385 dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
6386 dev->hbm_f_pg_supported = 1;
6387
6388 + dev->hbm_f_dc_supported = 0;
6389 if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
6390 dev->hbm_f_dc_supported = 1;
6391
6392 + dev->hbm_f_ie_supported = 0;
6393 if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
6394 dev->hbm_f_ie_supported = 1;
6395
6396 /* disconnect on connect timeout instead of link reset */
6397 + dev->hbm_f_dot_supported = 0;
6398 if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
6399 dev->hbm_f_dot_supported = 1;
6400
6401 /* Notification Event Support */
6402 + dev->hbm_f_ev_supported = 0;
6403 if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
6404 dev->hbm_f_ev_supported = 1;
6405
6406 /* Fixed Address Client Support */
6407 + dev->hbm_f_fa_supported = 0;
6408 if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
6409 dev->hbm_f_fa_supported = 1;
6410
6411 /* OS ver message Support */
6412 + dev->hbm_f_os_supported = 0;
6413 if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
6414 dev->hbm_f_os_supported = 1;
6415
6416 /* DMA Ring Support */
6417 + dev->hbm_f_dr_supported = 0;
6418 if (dev->version.major_version > HBM_MAJOR_VERSION_DR ||
6419 (dev->version.major_version == HBM_MAJOR_VERSION_DR &&
6420 dev->version.minor_version >= HBM_MINOR_VERSION_DR))
6421 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
6422 index d4f9bfbaf023..6600b3466dfb 100644
6423 --- a/drivers/mmc/core/core.c
6424 +++ b/drivers/mmc/core/core.c
6425 @@ -2378,9 +2378,9 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
6426 return card->pref_erase;
6427
6428 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
6429 - if (max_discard && mmc_can_trim(card)) {
6430 + if (mmc_can_trim(card)) {
6431 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
6432 - if (max_trim < max_discard)
6433 + if (max_trim < max_discard || max_discard == 0)
6434 max_discard = max_trim;
6435 } else if (max_discard < card->erase_size) {
6436 max_discard = 0;
6437 diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
6438 index 753973dc1655..8dae12b841b3 100644
6439 --- a/drivers/mmc/host/sdhci-esdhc-imx.c
6440 +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
6441 @@ -981,6 +981,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
6442 case MMC_TIMING_UHS_SDR25:
6443 case MMC_TIMING_UHS_SDR50:
6444 case MMC_TIMING_UHS_SDR104:
6445 + case MMC_TIMING_MMC_HS:
6446 case MMC_TIMING_MMC_HS200:
6447 writel(m, host->ioaddr + ESDHC_MIX_CTRL);
6448 break;
6449 diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
6450 index ae219b8a7754..2646faffd36e 100644
6451 --- a/drivers/net/can/flexcan.c
6452 +++ b/drivers/net/can/flexcan.c
6453 @@ -140,7 +140,7 @@
6454 #define FLEXCAN_TX_MB 63
6455 #define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
6456 #define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1)
6457 -#define FLEXCAN_IFLAG_MB(x) BIT(x & 0x1f)
6458 +#define FLEXCAN_IFLAG_MB(x) BIT((x) & 0x1f)
6459 #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
6460 #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
6461 #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
6462 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
6463 index fc8b48adf38b..2fa2caf7a746 100644
6464 --- a/drivers/net/dsa/bcm_sf2.c
6465 +++ b/drivers/net/dsa/bcm_sf2.c
6466 @@ -692,7 +692,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
6467 * port, the other ones have already been disabled during
6468 * bcm_sf2_sw_setup
6469 */
6470 - for (port = 0; port < DSA_MAX_PORTS; port++) {
6471 + for (port = 0; port < ds->num_ports; port++) {
6472 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
6473 bcm_sf2_port_disable(ds, port, NULL);
6474 }
6475 @@ -724,10 +724,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
6476 {
6477 struct net_device *p = ds->ports[port].cpu_dp->master;
6478 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
6479 - struct ethtool_wolinfo pwol;
6480 + struct ethtool_wolinfo pwol = { };
6481
6482 /* Get the parent device WoL settings */
6483 - p->ethtool_ops->get_wol(p, &pwol);
6484 + if (p->ethtool_ops->get_wol)
6485 + p->ethtool_ops->get_wol(p, &pwol);
6486
6487 /* Advertise the parent device supported settings */
6488 wol->supported = pwol.supported;
6489 @@ -748,9 +749,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
6490 struct net_device *p = ds->ports[port].cpu_dp->master;
6491 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
6492 s8 cpu_port = ds->ports[port].cpu_dp->index;
6493 - struct ethtool_wolinfo pwol;
6494 + struct ethtool_wolinfo pwol = { };
6495
6496 - p->ethtool_ops->get_wol(p, &pwol);
6497 + if (p->ethtool_ops->get_wol)
6498 + p->ethtool_ops->get_wol(p, &pwol);
6499 if (wol->wolopts & ~pwol.supported)
6500 return -EINVAL;
6501
6502 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
6503 index bb41becb6609..31ff1e0d1baa 100644
6504 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
6505 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
6506 @@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6507 {
6508 struct net_device *netdev;
6509 struct atl2_adapter *adapter;
6510 - static int cards_found;
6511 + static int cards_found = 0;
6512 unsigned long mmio_start;
6513 int mmio_len;
6514 int err;
6515
6516 - cards_found = 0;
6517 -
6518 err = pci_enable_device(pdev);
6519 if (err)
6520 return err;
6521 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
6522 index fc16b2b0d0e9..0bdbc72605e1 100644
6523 --- a/drivers/net/ethernet/broadcom/bcmsysport.c
6524 +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
6525 @@ -134,6 +134,10 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
6526
6527 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
6528 reg = rxchk_readl(priv, RXCHK_CONTROL);
6529 + /* Clear L2 header checks, which would prevent BPDUs
6530 + * from being received.
6531 + */
6532 + reg &= ~RXCHK_L2_HDR_DIS;
6533 if (priv->rx_chk_en)
6534 reg |= RXCHK_EN;
6535 else
6536 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
6537 index 1fdaf86bbe8f..0bd93bb7d1a2 100644
6538 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
6539 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
6540 @@ -3542,7 +3542,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
6541 if (len)
6542 break;
6543 /* on first few passes, just barely sleep */
6544 - if (i < DFLT_HWRM_CMD_TIMEOUT)
6545 + if (i < HWRM_SHORT_TIMEOUT_COUNTER)
6546 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
6547 HWRM_SHORT_MAX_TIMEOUT);
6548 else
6549 @@ -3565,7 +3565,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
6550 dma_rmb();
6551 if (*valid)
6552 break;
6553 - udelay(1);
6554 + usleep_range(1, 5);
6555 }
6556
6557 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
6558 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
6559 index bde384630a75..cf2d4a6583d5 100644
6560 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
6561 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
6562 @@ -548,7 +548,7 @@ struct rx_tpa_end_cmp_ext {
6563 (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
6564 ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
6565
6566 -#define HWRM_VALID_BIT_DELAY_USEC 20
6567 +#define HWRM_VALID_BIT_DELAY_USEC 150
6568
6569 #define BNXT_RX_EVENT 1
6570 #define BNXT_AGG_EVENT 2
6571 diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
6572 index 6c8dcb65ff03..90497a27df18 100644
6573 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c
6574 +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
6575 @@ -1039,7 +1039,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
6576 case NIC_MBOX_MSG_CFG_DONE:
6577 /* Last message of VF config msg sequence */
6578 nic_enable_vf(nic, vf, true);
6579 - goto unlock;
6580 + break;
6581 case NIC_MBOX_MSG_SHUTDOWN:
6582 /* First msg in VF teardown sequence */
6583 if (vf >= nic->num_vf_en)
6584 diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
6585 index 88f8a8fa93cd..9800738448ec 100644
6586 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
6587 +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
6588 @@ -172,6 +172,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic)
6589 return 1;
6590 }
6591
6592 +static void nicvf_send_cfg_done(struct nicvf *nic)
6593 +{
6594 + union nic_mbx mbx = {};
6595 +
6596 + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
6597 + if (nicvf_send_msg_to_pf(nic, &mbx)) {
6598 + netdev_err(nic->netdev,
6599 + "PF didn't respond to CFG DONE msg\n");
6600 + }
6601 +}
6602 +
6603 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
6604 {
6605 if (bgx->rx)
6606 @@ -1416,7 +1427,6 @@ int nicvf_open(struct net_device *netdev)
6607 struct nicvf *nic = netdev_priv(netdev);
6608 struct queue_set *qs = nic->qs;
6609 struct nicvf_cq_poll *cq_poll = NULL;
6610 - union nic_mbx mbx = {};
6611
6612 netif_carrier_off(netdev);
6613
6614 @@ -1512,8 +1522,7 @@ int nicvf_open(struct net_device *netdev)
6615 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
6616
6617 /* Send VF config done msg to PF */
6618 - mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
6619 - nicvf_write_to_mbx(nic, &mbx);
6620 + nicvf_send_cfg_done(nic);
6621
6622 return 0;
6623 cleanup:
6624 @@ -1941,7 +1950,8 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
6625
6626 /* flush DMAC filters and reset RX mode */
6627 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
6628 - nicvf_send_msg_to_pf(nic, &mbx);
6629 + if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
6630 + goto free_mc;
6631
6632 if (mode & BGX_XCAST_MCAST_FILTER) {
6633 /* once enabling filtering, we need to signal to PF to add
6634 @@ -1949,7 +1959,8 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
6635 */
6636 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
6637 mbx.xcast.data.mac = 0;
6638 - nicvf_send_msg_to_pf(nic, &mbx);
6639 + if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
6640 + goto free_mc;
6641 }
6642
6643 /* check if we have any specific MACs to be added to PF DMAC filter */
6644 @@ -1958,9 +1969,9 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
6645 for (idx = 0; idx < mc_addrs->count; idx++) {
6646 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
6647 mbx.xcast.data.mac = mc_addrs->mc[idx];
6648 - nicvf_send_msg_to_pf(nic, &mbx);
6649 + if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
6650 + goto free_mc;
6651 }
6652 - kfree(mc_addrs);
6653 }
6654
6655 /* and finally set rx mode for PF accordingly */
6656 @@ -1968,6 +1979,8 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
6657 mbx.xcast.data.mode = mode;
6658
6659 nicvf_send_msg_to_pf(nic, &mbx);
6660 +free_mc:
6661 + kfree(mc_addrs);
6662 }
6663
6664 static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
6665 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
6666 index 3b9e74be5fbd..b8155f5e71b4 100644
6667 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
6668 +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
6669 @@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
6670 dsaf_dev = dev_get_drvdata(&pdev->dev);
6671 if (!dsaf_dev) {
6672 dev_err(&pdev->dev, "dsaf_dev is NULL\n");
6673 + put_device(&pdev->dev);
6674 return -ENODEV;
6675 }
6676
6677 @@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
6678 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
6679 dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
6680 dsaf_dev->ae_dev.name);
6681 + put_device(&pdev->dev);
6682 return -ENODEV;
6683 }
6684
6685 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
6686 index 6cdd58d9d461..410d5d3aa393 100644
6687 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
6688 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
6689 @@ -3924,8 +3924,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
6690 else
6691 mrqc = IXGBE_MRQC_VMDQRSS64EN;
6692
6693 - /* Enable L3/L4 for Tx Switched packets */
6694 - mrqc |= IXGBE_MRQC_L3L4TXSWEN;
6695 + /* Enable L3/L4 for Tx Switched packets only for X550,
6696 + * older devices do not support this feature
6697 + */
6698 + if (hw->mac.type >= ixgbe_mac_X550)
6699 + mrqc |= IXGBE_MRQC_L3L4TXSWEN;
6700 } else {
6701 if (tcs > 4)
6702 mrqc = IXGBE_MRQC_RTRSS8TCEN;
6703 diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
6704 index 62f204f32316..59007d6cd36d 100644
6705 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c
6706 +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
6707 @@ -2886,7 +2886,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
6708
6709 ret = mv643xx_eth_shared_of_probe(pdev);
6710 if (ret)
6711 - return ret;
6712 + goto err_put_clk;
6713 pd = dev_get_platdata(&pdev->dev);
6714
6715 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
6716 @@ -2894,6 +2894,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
6717 infer_hw_params(msp);
6718
6719 return 0;
6720 +
6721 +err_put_clk:
6722 + if (!IS_ERR(msp->clk))
6723 + clk_disable_unprepare(msp->clk);
6724 + return ret;
6725 }
6726
6727 static int mv643xx_eth_shared_remove(struct platform_device *pdev)
6728 diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
6729 index a78a39244b79..2ba0d89aaf3c 100644
6730 --- a/drivers/net/ethernet/marvell/mvneta.c
6731 +++ b/drivers/net/ethernet/marvell/mvneta.c
6732 @@ -2147,7 +2147,7 @@ err_drop_frame:
6733 if (unlikely(!skb))
6734 goto err_drop_frame_ret_pool;
6735
6736 - dma_sync_single_range_for_cpu(dev->dev.parent,
6737 + dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
6738 rx_desc->buf_phys_addr,
6739 MVNETA_MH_SIZE + NET_SKB_PAD,
6740 rx_bytes,
6741 diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
6742 index eff57f7d056a..4e18d95e548f 100644
6743 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
6744 +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
6745 @@ -1288,15 +1288,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
6746
6747 static int
6748 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
6749 - enum alu_op alu_op, bool skip)
6750 + enum alu_op alu_op)
6751 {
6752 const struct bpf_insn *insn = &meta->insn;
6753
6754 - if (skip) {
6755 - meta->skip = true;
6756 - return 0;
6757 - }
6758 -
6759 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
6760 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
6761
6762 @@ -2306,7 +2301,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6763
6764 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6765 {
6766 - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
6767 + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
6768 }
6769
6770 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6771 @@ -2316,7 +2311,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6772
6773 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6774 {
6775 - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
6776 + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
6777 }
6778
6779 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6780 @@ -2326,7 +2321,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6781
6782 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6783 {
6784 - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
6785 + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
6786 }
6787
6788 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6789 @@ -2336,7 +2331,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6790
6791 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6792 {
6793 - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
6794 + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
6795 }
6796
6797 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6798 @@ -2346,7 +2341,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6799
6800 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6801 {
6802 - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
6803 + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
6804 }
6805
6806 static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6807 diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
6808 index e860bdf0f752..b7471e48db7b 100644
6809 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
6810 +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
6811 @@ -1689,6 +1689,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
6812
6813 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
6814
6815 + if (!ether_addr_equal(ethh->h_dest,
6816 + p_hwfn->p_rdma_info->iwarp.mac_addr)) {
6817 + DP_VERBOSE(p_hwfn,
6818 + QED_MSG_RDMA,
6819 + "Got unexpected mac %pM instead of %pM\n",
6820 + ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
6821 + return -EINVAL;
6822 + }
6823 +
6824 ether_addr_copy(remote_mac_addr, ethh->h_source);
6825 ether_addr_copy(local_mac_addr, ethh->h_dest);
6826
6827 @@ -2606,7 +2615,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
6828 struct qed_iwarp_info *iwarp_info;
6829 struct qed_ll2_acquire_data data;
6830 struct qed_ll2_cbs cbs;
6831 - u32 mpa_buff_size;
6832 + u32 buff_size;
6833 u16 n_ooo_bufs;
6834 int rc = 0;
6835 int i;
6836 @@ -2633,7 +2642,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
6837
6838 memset(&data, 0, sizeof(data));
6839 data.input.conn_type = QED_LL2_TYPE_IWARP;
6840 - data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
6841 + data.input.mtu = params->max_mtu;
6842 data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
6843 data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
6844 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
6845 @@ -2655,9 +2664,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
6846 goto err;
6847 }
6848
6849 + buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
6850 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
6851 QED_IWARP_LL2_SYN_RX_SIZE,
6852 - QED_IWARP_MAX_SYN_PKT_SIZE,
6853 + buff_size,
6854 iwarp_info->ll2_syn_handle);
6855 if (rc)
6856 goto err;
6857 @@ -2711,10 +2721,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
6858 if (rc)
6859 goto err;
6860
6861 - mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
6862 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
6863 data.input.rx_num_desc,
6864 - mpa_buff_size,
6865 + buff_size,
6866 iwarp_info->ll2_mpa_handle);
6867 if (rc)
6868 goto err;
6869 @@ -2727,7 +2736,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
6870
6871 iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
6872
6873 - iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
6874 + iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
6875 if (!iwarp_info->mpa_intermediate_buf)
6876 goto err;
6877
6878 diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
6879 index b8f612d00241..7ac959038324 100644
6880 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
6881 +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
6882 @@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
6883
6884 #define QED_IWARP_LL2_SYN_TX_SIZE (128)
6885 #define QED_IWARP_LL2_SYN_RX_SIZE (256)
6886 -#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
6887
6888 #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256)
6889 #define QED_IWARP_MAX_OOO (16)
6890 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
6891 index 6e381354f658..74bebbdb4b15 100644
6892 --- a/drivers/net/usb/qmi_wwan.c
6893 +++ b/drivers/net/usb/qmi_wwan.c
6894 @@ -1208,8 +1208,8 @@ static const struct usb_device_id products[] = {
6895 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
6896 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
6897 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
6898 - {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */
6899 - {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */
6900 + {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */
6901 + {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */
6902 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
6903 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
6904 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
6905 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
6906 index 4ca6592f5b3a..7cd428c0af43 100644
6907 --- a/drivers/net/wireless/mac80211_hwsim.c
6908 +++ b/drivers/net/wireless/mac80211_hwsim.c
6909 @@ -3454,7 +3454,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
6910 goto out_err;
6911 }
6912
6913 - genlmsg_reply(skb, info);
6914 + res = genlmsg_reply(skb, info);
6915 break;
6916 }
6917
6918 diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
6919 index 789337ea676a..6ede6168bd85 100644
6920 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
6921 +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
6922 @@ -433,8 +433,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
6923 skb_tail_pointer(skb),
6924 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
6925
6926 - cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
6927 -
6928 lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n",
6929 cardp->rx_urb);
6930 ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
6931 diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
6932 index 1d28cd656536..1eeb7be6aa34 100644
6933 --- a/drivers/nvdimm/label.c
6934 +++ b/drivers/nvdimm/label.c
6935 @@ -625,7 +625,7 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
6936
6937 static int __pmem_label_update(struct nd_region *nd_region,
6938 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
6939 - int pos)
6940 + int pos, unsigned long flags)
6941 {
6942 struct nd_namespace_common *ndns = &nspm->nsio.common;
6943 struct nd_interleave_set *nd_set = nd_region->nd_set;
6944 @@ -666,7 +666,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
6945 memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
6946 if (nspm->alt_name)
6947 memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
6948 - nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
6949 + nd_label->flags = __cpu_to_le32(flags);
6950 nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
6951 nd_label->position = __cpu_to_le16(pos);
6952 nd_label->isetcookie = __cpu_to_le64(cookie);
6953 @@ -1120,13 +1120,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
6954 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
6955 struct nd_namespace_pmem *nspm, resource_size_t size)
6956 {
6957 - int i;
6958 + int i, rc;
6959
6960 for (i = 0; i < nd_region->ndr_mappings; i++) {
6961 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
6962 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
6963 struct resource *res;
6964 - int rc, count = 0;
6965 + int count = 0;
6966
6967 if (size == 0) {
6968 rc = del_labels(nd_mapping, nspm->uuid);
6969 @@ -1144,7 +1144,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
6970 if (rc < 0)
6971 return rc;
6972
6973 - rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
6974 + rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
6975 + NSLABEL_FLAG_UPDATING);
6976 + if (rc)
6977 + return rc;
6978 + }
6979 +
6980 + if (size == 0)
6981 + return 0;
6982 +
6983 + /* Clear the UPDATING flag per UEFI 2.7 expectations */
6984 + for (i = 0; i < nd_region->ndr_mappings; i++) {
6985 + struct nd_mapping *nd_mapping = &nd_region->mapping[i];
6986 +
6987 + rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
6988 if (rc)
6989 return rc;
6990 }
6991 diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
6992 index 4a4266250c28..54d79837f7c6 100644
6993 --- a/drivers/nvdimm/namespace_devs.c
6994 +++ b/drivers/nvdimm/namespace_devs.c
6995 @@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
6996 bool pmem_should_map_pages(struct device *dev)
6997 {
6998 struct nd_region *nd_region = to_nd_region(dev->parent);
6999 + struct nd_namespace_common *ndns = to_ndns(dev);
7000 struct nd_namespace_io *nsio;
7001
7002 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
7003 @@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev)
7004 if (is_nd_pfn(dev) || is_nd_btt(dev))
7005 return false;
7006
7007 + if (ndns->force_raw)
7008 + return false;
7009 +
7010 nsio = to_nd_namespace_io(dev);
7011 if (region_intersects(nsio->res.start, resource_size(&nsio->res),
7012 IORESOURCE_SYSTEM_RAM,
7013 diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
7014 index 7fe84bfe0878..3ee995a3bfc9 100644
7015 --- a/drivers/nvdimm/pfn_devs.c
7016 +++ b/drivers/nvdimm/pfn_devs.c
7017 @@ -534,7 +534,7 @@ static unsigned long init_altmap_base(resource_size_t base)
7018
7019 static unsigned long init_altmap_reserve(resource_size_t base)
7020 {
7021 - unsigned long reserve = PHYS_PFN(SZ_8K);
7022 + unsigned long reserve = PFN_UP(SZ_8K);
7023 unsigned long base_pfn = PHYS_PFN(base);
7024
7025 reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
7026 @@ -619,7 +619,7 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun
7027 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
7028 IORES_DESC_NONE) == REGION_MIXED
7029 || !IS_ALIGNED(end, nd_pfn->align)
7030 - || nd_region_conflict(nd_region, start, size + adjust))
7031 + || nd_region_conflict(nd_region, start, size))
7032 *end_trunc = end - phys_pmem_align_down(nd_pfn, end);
7033 }
7034
7035 diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
7036 index 380916bff9e0..dee5b9e35ffd 100644
7037 --- a/drivers/parport/parport_pc.c
7038 +++ b/drivers/parport/parport_pc.c
7039 @@ -1377,7 +1377,7 @@ static struct superio_struct *find_superio(struct parport *p)
7040 {
7041 int i;
7042 for (i = 0; i < NR_SUPERIOS; i++)
7043 - if (superios[i].io != p->base)
7044 + if (superios[i].io == p->base)
7045 return &superios[i];
7046 return NULL;
7047 }
7048 diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
7049 index 0fa9e8fdce66..b56e22262a77 100644
7050 --- a/drivers/pci/controller/dwc/pcie-designware-host.c
7051 +++ b/drivers/pci/controller/dwc/pcie-designware-host.c
7052 @@ -439,7 +439,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
7053 if (ret)
7054 pci->num_viewport = 2;
7055
7056 - if (IS_ENABLED(CONFIG_PCI_MSI)) {
7057 + if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) {
7058 /*
7059 * If a specific SoC driver needs to change the
7060 * default number of vectors, it needs to implement
7061 diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
7062 index f03279fc87cd..1908dd2978d3 100644
7063 --- a/drivers/pci/pcie/dpc.c
7064 +++ b/drivers/pci/pcie/dpc.c
7065 @@ -153,6 +153,28 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
7066 pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
7067 }
7068
7069 +static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
7070 + struct aer_err_info *info)
7071 +{
7072 + int pos = dev->aer_cap;
7073 + u32 status, mask, sev;
7074 +
7075 + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
7076 + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
7077 + status &= ~mask;
7078 + if (!status)
7079 + return 0;
7080 +
7081 + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
7082 + status &= sev;
7083 + if (status)
7084 + info->severity = AER_FATAL;
7085 + else
7086 + info->severity = AER_NONFATAL;
7087 +
7088 + return 1;
7089 +}
7090 +
7091 static irqreturn_t dpc_handler(int irq, void *context)
7092 {
7093 struct aer_err_info info;
7094 @@ -180,9 +202,12 @@ static irqreturn_t dpc_handler(int irq, void *context)
7095 /* show RP PIO error detail information */
7096 if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
7097 dpc_process_rp_pio_error(dpc);
7098 - else if (reason == 0 && aer_get_device_error_info(pdev, &info)) {
7099 + else if (reason == 0 &&
7100 + dpc_get_aer_uncorrect_severity(pdev, &info) &&
7101 + aer_get_device_error_info(pdev, &info)) {
7102 aer_print_error(pdev, &info);
7103 pci_cleanup_aer_uncorrect_error_status(pdev);
7104 + pci_aer_clear_fatal_status(pdev);
7105 }
7106
7107 /* We configure DPC so it only triggers on ERR_FATAL */
7108 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
7109 index 201f9e5ff55c..4a4c16bfc0d3 100644
7110 --- a/drivers/pci/probe.c
7111 +++ b/drivers/pci/probe.c
7112 @@ -2038,11 +2038,8 @@ static void pci_configure_ltr(struct pci_dev *dev)
7113 {
7114 #ifdef CONFIG_PCIEASPM
7115 struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
7116 - u32 cap;
7117 struct pci_dev *bridge;
7118 -
7119 - if (!host->native_ltr)
7120 - return;
7121 + u32 cap, ctl;
7122
7123 if (!pci_is_pcie(dev))
7124 return;
7125 @@ -2051,22 +2048,35 @@ static void pci_configure_ltr(struct pci_dev *dev)
7126 if (!(cap & PCI_EXP_DEVCAP2_LTR))
7127 return;
7128
7129 - /*
7130 - * Software must not enable LTR in an Endpoint unless the Root
7131 - * Complex and all intermediate Switches indicate support for LTR.
7132 - * PCIe r3.1, sec 6.18.
7133 - */
7134 - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
7135 - dev->ltr_path = 1;
7136 - else {
7137 + pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl);
7138 + if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
7139 + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
7140 + dev->ltr_path = 1;
7141 + return;
7142 + }
7143 +
7144 bridge = pci_upstream_bridge(dev);
7145 if (bridge && bridge->ltr_path)
7146 dev->ltr_path = 1;
7147 +
7148 + return;
7149 }
7150
7151 - if (dev->ltr_path)
7152 + if (!host->native_ltr)
7153 + return;
7154 +
7155 + /*
7156 + * Software must not enable LTR in an Endpoint unless the Root
7157 + * Complex and all intermediate Switches indicate support for LTR.
7158 + * PCIe r4.0, sec 6.18.
7159 + */
7160 + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
7161 + ((bridge = pci_upstream_bridge(dev)) &&
7162 + bridge->ltr_path)) {
7163 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
7164 PCI_EXP_DEVCTL2_LTR_EN);
7165 + dev->ltr_path = 1;
7166 + }
7167 #endif
7168 }
7169
7170 diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
7171 index 91cffc051055..ead4beb5f55f 100644
7172 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c
7173 +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
7174 @@ -665,7 +665,7 @@ static const char * const sd_a_groups[] = {
7175
7176 static const char * const sdxc_a_groups[] = {
7177 "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
7178 - "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a"
7179 + "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a"
7180 };
7181
7182 static const char * const pcm_a_groups[] = {
7183 diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
7184 index e4905bef2663..37be541e057d 100644
7185 --- a/drivers/power/supply/cpcap-charger.c
7186 +++ b/drivers/power/supply/cpcap-charger.c
7187 @@ -458,6 +458,7 @@ static void cpcap_usb_detect(struct work_struct *work)
7188 goto out_err;
7189 }
7190
7191 + power_supply_changed(ddata->usb);
7192 return;
7193
7194 out_err:
7195 diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
7196 index b94e3a721721..cd93cf53e23c 100644
7197 --- a/drivers/regulator/max77620-regulator.c
7198 +++ b/drivers/regulator/max77620-regulator.c
7199 @@ -1,7 +1,7 @@
7200 /*
7201 * Maxim MAX77620 Regulator driver
7202 *
7203 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
7204 + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
7205 *
7206 * Author: Mallikarjun Kasoju <mkasoju@nvidia.com>
7207 * Laxman Dewangan <ldewangan@nvidia.com>
7208 @@ -803,6 +803,14 @@ static int max77620_regulator_probe(struct platform_device *pdev)
7209 rdesc = &rinfo[id].desc;
7210 pmic->rinfo[id] = &max77620_regs_info[id];
7211 pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL;
7212 + pmic->reg_pdata[id].active_fps_src = -1;
7213 + pmic->reg_pdata[id].active_fps_pd_slot = -1;
7214 + pmic->reg_pdata[id].active_fps_pu_slot = -1;
7215 + pmic->reg_pdata[id].suspend_fps_src = -1;
7216 + pmic->reg_pdata[id].suspend_fps_pd_slot = -1;
7217 + pmic->reg_pdata[id].suspend_fps_pu_slot = -1;
7218 + pmic->reg_pdata[id].power_ok = -1;
7219 + pmic->reg_pdata[id].ramp_rate_setting = -1;
7220
7221 ret = max77620_read_slew_rate(pmic, id);
7222 if (ret < 0)
7223 diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
7224 index 095d25f3d2ea..58a1fe583a6c 100644
7225 --- a/drivers/regulator/s2mpa01.c
7226 +++ b/drivers/regulator/s2mpa01.c
7227 @@ -298,13 +298,13 @@ static const struct regulator_desc regulators[] = {
7228 regulator_desc_ldo(2, STEP_50_MV),
7229 regulator_desc_ldo(3, STEP_50_MV),
7230 regulator_desc_ldo(4, STEP_50_MV),
7231 - regulator_desc_ldo(5, STEP_50_MV),
7232 + regulator_desc_ldo(5, STEP_25_MV),
7233 regulator_desc_ldo(6, STEP_25_MV),
7234 regulator_desc_ldo(7, STEP_50_MV),
7235 regulator_desc_ldo(8, STEP_50_MV),
7236 regulator_desc_ldo(9, STEP_50_MV),
7237 regulator_desc_ldo(10, STEP_50_MV),
7238 - regulator_desc_ldo(11, STEP_25_MV),
7239 + regulator_desc_ldo(11, STEP_50_MV),
7240 regulator_desc_ldo(12, STEP_50_MV),
7241 regulator_desc_ldo(13, STEP_50_MV),
7242 regulator_desc_ldo(14, STEP_50_MV),
7243 @@ -315,11 +315,11 @@ static const struct regulator_desc regulators[] = {
7244 regulator_desc_ldo(19, STEP_50_MV),
7245 regulator_desc_ldo(20, STEP_50_MV),
7246 regulator_desc_ldo(21, STEP_50_MV),
7247 - regulator_desc_ldo(22, STEP_25_MV),
7248 - regulator_desc_ldo(23, STEP_25_MV),
7249 + regulator_desc_ldo(22, STEP_50_MV),
7250 + regulator_desc_ldo(23, STEP_50_MV),
7251 regulator_desc_ldo(24, STEP_50_MV),
7252 regulator_desc_ldo(25, STEP_50_MV),
7253 - regulator_desc_ldo(26, STEP_50_MV),
7254 + regulator_desc_ldo(26, STEP_25_MV),
7255 regulator_desc_buck1_4(1),
7256 regulator_desc_buck1_4(2),
7257 regulator_desc_buck1_4(3),
7258 diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
7259 index 5bb6f4ca48db..c584bd1ffa9c 100644
7260 --- a/drivers/regulator/s2mps11.c
7261 +++ b/drivers/regulator/s2mps11.c
7262 @@ -363,7 +363,7 @@ static const struct regulator_desc s2mps11_regulators[] = {
7263 regulator_desc_s2mps11_ldo(32, STEP_50_MV),
7264 regulator_desc_s2mps11_ldo(33, STEP_50_MV),
7265 regulator_desc_s2mps11_ldo(34, STEP_50_MV),
7266 - regulator_desc_s2mps11_ldo(35, STEP_50_MV),
7267 + regulator_desc_s2mps11_ldo(35, STEP_25_MV),
7268 regulator_desc_s2mps11_ldo(36, STEP_50_MV),
7269 regulator_desc_s2mps11_ldo(37, STEP_50_MV),
7270 regulator_desc_s2mps11_ldo(38, STEP_50_MV),
7271 @@ -373,8 +373,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
7272 regulator_desc_s2mps11_buck1_4(4),
7273 regulator_desc_s2mps11_buck5,
7274 regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
7275 - regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
7276 - regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
7277 + regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
7278 + regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
7279 regulator_desc_s2mps11_buck9,
7280 regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
7281 };
7282 diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
7283 index 4e7b55a14b1a..6e294b4d3635 100644
7284 --- a/drivers/s390/block/dasd_eckd.c
7285 +++ b/drivers/s390/block/dasd_eckd.c
7286 @@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
7287 usrparm.psf_data &= 0x7fffffffULL;
7288 usrparm.rssd_result &= 0x7fffffffULL;
7289 }
7290 + /* at least 2 bytes are accessed and should be allocated */
7291 + if (usrparm.psf_data_len < 2) {
7292 + DBF_DEV_EVENT(DBF_WARNING, device,
7293 + "Symmetrix ioctl invalid data length %d",
7294 + usrparm.psf_data_len);
7295 + rc = -EINVAL;
7296 + goto out;
7297 + }
7298 /* alloc I/O data area */
7299 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
7300 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
7301 diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
7302 index b67dc4974f23..ec54538f7ae1 100644
7303 --- a/drivers/s390/virtio/virtio_ccw.c
7304 +++ b/drivers/s390/virtio/virtio_ccw.c
7305 @@ -272,6 +272,8 @@ static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
7306 {
7307 struct virtio_ccw_vq_info *info;
7308
7309 + if (!vcdev->airq_info)
7310 + return;
7311 list_for_each_entry(info, &vcdev->virtqueues, node)
7312 drop_airq_indicator(info->vq, vcdev->airq_info);
7313 }
7314 @@ -413,7 +415,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
7315 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
7316 if (ret)
7317 return ret;
7318 - return vcdev->config_block->num;
7319 + return vcdev->config_block->num ?: -ENOENT;
7320 }
7321
7322 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
7323 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
7324 index 04443577d48b..1046947064a0 100644
7325 --- a/drivers/scsi/aacraid/linit.c
7326 +++ b/drivers/scsi/aacraid/linit.c
7327 @@ -413,13 +413,16 @@ static int aac_slave_configure(struct scsi_device *sdev)
7328 if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
7329 devtype = aac->hba_map[chn][tid].devtype;
7330
7331 - if (devtype == AAC_DEVTYPE_NATIVE_RAW)
7332 + if (devtype == AAC_DEVTYPE_NATIVE_RAW) {
7333 depth = aac->hba_map[chn][tid].qd_limit;
7334 - else if (devtype == AAC_DEVTYPE_ARC_RAW)
7335 + set_timeout = 1;
7336 + goto common_config;
7337 + }
7338 + if (devtype == AAC_DEVTYPE_ARC_RAW) {
7339 set_qd_dev_type = true;
7340 -
7341 - set_timeout = 1;
7342 - goto common_config;
7343 + set_timeout = 1;
7344 + goto common_config;
7345 + }
7346 }
7347
7348 if (aac->jbod && (sdev->type == TYPE_DISK))
7349 diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
7350 index f78d2e5c1471..4ad61cfa69c0 100644
7351 --- a/drivers/scsi/libiscsi.c
7352 +++ b/drivers/scsi/libiscsi.c
7353 @@ -1449,7 +1449,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
7354 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
7355 return -ENODATA;
7356
7357 + spin_lock_bh(&conn->session->back_lock);
7358 + if (conn->task == NULL) {
7359 + spin_unlock_bh(&conn->session->back_lock);
7360 + return -ENODATA;
7361 + }
7362 __iscsi_get_task(task);
7363 + spin_unlock_bh(&conn->session->back_lock);
7364 spin_unlock_bh(&conn->session->frwd_lock);
7365 rc = conn->session->tt->xmit_task(task);
7366 spin_lock_bh(&conn->session->frwd_lock);
7367 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
7368 index 5352c9bbcaf7..f84f9bf15027 100644
7369 --- a/drivers/scsi/qla2xxx/qla_init.c
7370 +++ b/drivers/scsi/qla2xxx/qla_init.c
7371 @@ -643,11 +643,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
7372 break;
7373 case DSC_LS_PORT_UNAVAIL:
7374 default:
7375 - if (fcport->loop_id != FC_NO_LOOP_ID)
7376 - qla2x00_clear_loop_id(fcport);
7377 -
7378 - fcport->loop_id = loop_id;
7379 - fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
7380 + if (fcport->loop_id == FC_NO_LOOP_ID) {
7381 + qla2x00_find_new_loop_id(vha, fcport);
7382 + fcport->fw_login_state =
7383 + DSC_LS_PORT_UNAVAIL;
7384 + }
7385 + ql_dbg(ql_dbg_disc, vha, 0x20e5,
7386 + "%s %d %8phC\n", __func__, __LINE__,
7387 + fcport->port_name);
7388 qla24xx_fcport_handle_login(vha, fcport);
7389 break;
7390 }
7391 @@ -1719,13 +1722,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
7392
7393 /* Issue Marker IOCB */
7394 qla2x00_marker(vha, vha->hw->req_q_map[0],
7395 - vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
7396 + vha->hw->rsp_q_map[0], fcport->loop_id, lun,
7397 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
7398 }
7399
7400 done_free_sp:
7401 sp->free(sp);
7402 - sp->fcport->flags &= ~FCF_ASYNC_SENT;
7403 + fcport->flags &= ~FCF_ASYNC_SENT;
7404 done:
7405 return rval;
7406 }
7407 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
7408 index 58b78702c6c9..a3a5162fa60e 100644
7409 --- a/drivers/scsi/sd.c
7410 +++ b/drivers/scsi/sd.c
7411 @@ -3066,6 +3066,55 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
7412 sdkp->security = 1;
7413 }
7414
7415 +/*
7416 + * Determine the device's preferred I/O size for reads and writes
7417 + * unless the reported value is unreasonably small, large, not a
7418 + * multiple of the physical block size, or simply garbage.
7419 + */
7420 +static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
7421 + unsigned int dev_max)
7422 +{
7423 + struct scsi_device *sdp = sdkp->device;
7424 + unsigned int opt_xfer_bytes =
7425 + logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
7426 +
7427 + if (sdkp->opt_xfer_blocks > dev_max) {
7428 + sd_first_printk(KERN_WARNING, sdkp,
7429 + "Optimal transfer size %u logical blocks " \
7430 + "> dev_max (%u logical blocks)\n",
7431 + sdkp->opt_xfer_blocks, dev_max);
7432 + return false;
7433 + }
7434 +
7435 + if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
7436 + sd_first_printk(KERN_WARNING, sdkp,
7437 + "Optimal transfer size %u logical blocks " \
7438 + "> sd driver limit (%u logical blocks)\n",
7439 + sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
7440 + return false;
7441 + }
7442 +
7443 + if (opt_xfer_bytes < PAGE_SIZE) {
7444 + sd_first_printk(KERN_WARNING, sdkp,
7445 + "Optimal transfer size %u bytes < " \
7446 + "PAGE_SIZE (%u bytes)\n",
7447 + opt_xfer_bytes, (unsigned int)PAGE_SIZE);
7448 + return false;
7449 + }
7450 +
7451 + if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
7452 + sd_first_printk(KERN_WARNING, sdkp,
7453 + "Optimal transfer size %u bytes not a " \
7454 + "multiple of physical block size (%u bytes)\n",
7455 + opt_xfer_bytes, sdkp->physical_block_size);
7456 + return false;
7457 + }
7458 +
7459 + sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
7460 + opt_xfer_bytes);
7461 + return true;
7462 +}
7463 +
7464 /**
7465 * sd_revalidate_disk - called the first time a new disk is seen,
7466 * performs disk spin up, read_capacity, etc.
7467 @@ -3144,15 +3193,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
7468 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
7469 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
7470
7471 - /*
7472 - * Determine the device's preferred I/O size for reads and writes
7473 - * unless the reported value is unreasonably small, large, or
7474 - * garbage.
7475 - */
7476 - if (sdkp->opt_xfer_blocks &&
7477 - sdkp->opt_xfer_blocks <= dev_max &&
7478 - sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
7479 - logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
7480 + if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
7481 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
7482 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
7483 } else
7484 diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
7485 index 1c72db94270e..3d331a864b2f 100644
7486 --- a/drivers/scsi/virtio_scsi.c
7487 +++ b/drivers/scsi/virtio_scsi.c
7488 @@ -621,7 +621,6 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
7489 return FAILED;
7490
7491 memset(cmd, 0, sizeof(*cmd));
7492 - cmd->sc = sc;
7493 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
7494 .type = VIRTIO_SCSI_T_TMF,
7495 .subtype = cpu_to_virtio32(vscsi->vdev,
7496 @@ -680,7 +679,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
7497 return FAILED;
7498
7499 memset(cmd, 0, sizeof(*cmd));
7500 - cmd->sc = sc;
7501 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
7502 .type = VIRTIO_SCSI_T_TMF,
7503 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
7504 diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
7505 index c7beb6841289..ab8f731a3426 100644
7506 --- a/drivers/soc/qcom/rpmh.c
7507 +++ b/drivers/soc/qcom/rpmh.c
7508 @@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
7509 struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
7510 msg);
7511 struct completion *compl = rpm_msg->completion;
7512 + bool free = rpm_msg->needs_free;
7513
7514 rpm_msg->err = r;
7515
7516 @@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
7517 complete(compl);
7518
7519 exit:
7520 - if (rpm_msg->needs_free)
7521 + if (free)
7522 kfree(rpm_msg);
7523 }
7524
7525 @@ -348,11 +349,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
7526 {
7527 struct batch_cache_req *req;
7528 struct rpmh_request *rpm_msgs;
7529 - DECLARE_COMPLETION_ONSTACK(compl);
7530 + struct completion *compls;
7531 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
7532 unsigned long time_left;
7533 int count = 0;
7534 - int ret, i, j;
7535 + int ret, i;
7536 + void *ptr;
7537
7538 if (!cmd || !n)
7539 return -EINVAL;
7540 @@ -362,10 +364,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
7541 if (!count)
7542 return -EINVAL;
7543
7544 - req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
7545 + ptr = kzalloc(sizeof(*req) +
7546 + count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
7547 GFP_ATOMIC);
7548 - if (!req)
7549 + if (!ptr)
7550 return -ENOMEM;
7551 +
7552 + req = ptr;
7553 + compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
7554 +
7555 req->count = count;
7556 rpm_msgs = req->rpm_msgs;
7557
7558 @@ -380,25 +387,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
7559 }
7560
7561 for (i = 0; i < count; i++) {
7562 - rpm_msgs[i].completion = &compl;
7563 + struct completion *compl = &compls[i];
7564 +
7565 + init_completion(compl);
7566 + rpm_msgs[i].completion = compl;
7567 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
7568 if (ret) {
7569 pr_err("Error(%d) sending RPMH message addr=%#x\n",
7570 ret, rpm_msgs[i].msg.cmds[0].addr);
7571 - for (j = i; j < count; j++)
7572 - rpmh_tx_done(&rpm_msgs[j].msg, ret);
7573 break;
7574 }
7575 }
7576
7577 time_left = RPMH_TIMEOUT_MS;
7578 - for (i = 0; i < count; i++) {
7579 - time_left = wait_for_completion_timeout(&compl, time_left);
7580 + while (i--) {
7581 + time_left = wait_for_completion_timeout(&compls[i], time_left);
7582 if (!time_left) {
7583 /*
7584 * Better hope they never finish because they'll signal
7585 - * the completion on our stack and that's bad once
7586 - * we've returned from the function.
7587 + * the completion that we're going to free once
7588 + * we've returned from this function.
7589 */
7590 WARN_ON(1);
7591 ret = -ETIMEDOUT;
7592 @@ -407,7 +415,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
7593 }
7594
7595 exit:
7596 - kfree(req);
7597 + kfree(ptr);
7598
7599 return ret;
7600 }
7601 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
7602 index 14f4ea59caff..b624f6fb04ce 100644
7603 --- a/drivers/spi/spi-pxa2xx.c
7604 +++ b/drivers/spi/spi-pxa2xx.c
7605 @@ -1612,6 +1612,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
7606 platform_info->enable_dma = false;
7607 } else {
7608 master->can_dma = pxa2xx_spi_can_dma;
7609 + master->max_dma_len = MAX_DMA_LEN;
7610 }
7611 }
7612
7613 diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
7614 index 5f19016bbf10..b9fb6493cd6b 100644
7615 --- a/drivers/spi/spi-ti-qspi.c
7616 +++ b/drivers/spi/spi-ti-qspi.c
7617 @@ -490,8 +490,8 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi)
7618 ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
7619 if (qspi->ctrl_base) {
7620 regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
7621 - MEM_CS_EN(spi->chip_select),
7622 - MEM_CS_MASK);
7623 + MEM_CS_MASK,
7624 + MEM_CS_EN(spi->chip_select));
7625 }
7626 qspi->mmap_enabled = true;
7627 }
7628 @@ -503,7 +503,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi)
7629 ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
7630 if (qspi->ctrl_base)
7631 regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
7632 - 0, MEM_CS_MASK);
7633 + MEM_CS_MASK, 0);
7634 qspi->mmap_enabled = false;
7635 }
7636
7637 diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
7638 index 28f41caba05d..fb442499f806 100644
7639 --- a/drivers/staging/media/imx/imx-ic-prpencvf.c
7640 +++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
7641 @@ -680,12 +680,23 @@ static int prp_start(struct prp_priv *priv)
7642 goto out_free_nfb4eof_irq;
7643 }
7644
7645 + /* start upstream */
7646 + ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
7647 + ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
7648 + if (ret) {
7649 + v4l2_err(&ic_priv->sd,
7650 + "upstream stream on failed: %d\n", ret);
7651 + goto out_free_eof_irq;
7652 + }
7653 +
7654 /* start the EOF timeout timer */
7655 mod_timer(&priv->eof_timeout_timer,
7656 jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
7657
7658 return 0;
7659
7660 +out_free_eof_irq:
7661 + devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
7662 out_free_nfb4eof_irq:
7663 devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
7664 out_unsetup:
7665 @@ -717,6 +728,12 @@ static void prp_stop(struct prp_priv *priv)
7666 if (ret == 0)
7667 v4l2_warn(&ic_priv->sd, "wait last EOF timeout\n");
7668
7669 + /* stop upstream */
7670 + ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
7671 + if (ret && ret != -ENOIOCTLCMD)
7672 + v4l2_warn(&ic_priv->sd,
7673 + "upstream stream off failed: %d\n", ret);
7674 +
7675 devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
7676 devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
7677
7678 @@ -1148,15 +1165,6 @@ static int prp_s_stream(struct v4l2_subdev *sd, int enable)
7679 if (ret)
7680 goto out;
7681
7682 - /* start/stop upstream */
7683 - ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable);
7684 - ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
7685 - if (ret) {
7686 - if (enable)
7687 - prp_stop(priv);
7688 - goto out;
7689 - }
7690 -
7691 update_count:
7692 priv->stream_count += enable ? 1 : -1;
7693 if (priv->stream_count < 0)
7694 diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
7695 index cd2c291e1e94..e22f1239a318 100644
7696 --- a/drivers/staging/media/imx/imx-media-csi.c
7697 +++ b/drivers/staging/media/imx/imx-media-csi.c
7698 @@ -626,7 +626,7 @@ out_put_ipu:
7699 return ret;
7700 }
7701
7702 -static void csi_idmac_stop(struct csi_priv *priv)
7703 +static void csi_idmac_wait_last_eof(struct csi_priv *priv)
7704 {
7705 unsigned long flags;
7706 int ret;
7707 @@ -643,7 +643,10 @@ static void csi_idmac_stop(struct csi_priv *priv)
7708 &priv->last_eof_comp, msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
7709 if (ret == 0)
7710 v4l2_warn(&priv->sd, "wait last EOF timeout\n");
7711 +}
7712
7713 +static void csi_idmac_stop(struct csi_priv *priv)
7714 +{
7715 devm_free_irq(priv->dev, priv->eof_irq, priv);
7716 devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
7717
7718 @@ -719,10 +722,16 @@ static int csi_start(struct csi_priv *priv)
7719
7720 output_fi = &priv->frame_interval[priv->active_output_pad];
7721
7722 + /* start upstream */
7723 + ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
7724 + ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
7725 + if (ret)
7726 + return ret;
7727 +
7728 if (priv->dest == IPU_CSI_DEST_IDMAC) {
7729 ret = csi_idmac_start(priv);
7730 if (ret)
7731 - return ret;
7732 + goto stop_upstream;
7733 }
7734
7735 ret = csi_setup(priv);
7736 @@ -750,11 +759,26 @@ fim_off:
7737 idmac_stop:
7738 if (priv->dest == IPU_CSI_DEST_IDMAC)
7739 csi_idmac_stop(priv);
7740 +stop_upstream:
7741 + v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
7742 return ret;
7743 }
7744
7745 static void csi_stop(struct csi_priv *priv)
7746 {
7747 + if (priv->dest == IPU_CSI_DEST_IDMAC)
7748 + csi_idmac_wait_last_eof(priv);
7749 +
7750 + /*
7751 + * Disable the CSI asap, after syncing with the last EOF.
7752 + * Doing so after the IDMA channel is disabled has shown to
7753 + * create hard system-wide hangs.
7754 + */
7755 + ipu_csi_disable(priv->csi);
7756 +
7757 + /* stop upstream */
7758 + v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
7759 +
7760 if (priv->dest == IPU_CSI_DEST_IDMAC) {
7761 csi_idmac_stop(priv);
7762
7763 @@ -762,8 +786,6 @@ static void csi_stop(struct csi_priv *priv)
7764 if (priv->fim)
7765 imx_media_fim_set_stream(priv->fim, NULL, false);
7766 }
7767 -
7768 - ipu_csi_disable(priv->csi);
7769 }
7770
7771 static const struct csi_skip_desc csi_skip[12] = {
7772 @@ -924,23 +946,13 @@ static int csi_s_stream(struct v4l2_subdev *sd, int enable)
7773 goto update_count;
7774
7775 if (enable) {
7776 - /* upstream must be started first, before starting CSI */
7777 - ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
7778 - ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
7779 - if (ret)
7780 - goto out;
7781 -
7782 dev_dbg(priv->dev, "stream ON\n");
7783 ret = csi_start(priv);
7784 - if (ret) {
7785 - v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
7786 + if (ret)
7787 goto out;
7788 - }
7789 } else {
7790 dev_dbg(priv->dev, "stream OFF\n");
7791 - /* CSI must be stopped first, then stop upstream */
7792 csi_stop(priv);
7793 - v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
7794 }
7795
7796 update_count:
7797 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
7798 index cc756a123fd8..03e9cb156df9 100644
7799 --- a/drivers/target/iscsi/iscsi_target.c
7800 +++ b/drivers/target/iscsi/iscsi_target.c
7801 @@ -4045,9 +4045,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
7802 struct se_cmd *se_cmd = &cmd->se_cmd;
7803
7804 if (se_cmd->se_tfo != NULL) {
7805 - spin_lock(&se_cmd->t_state_lock);
7806 + spin_lock_irq(&se_cmd->t_state_lock);
7807 se_cmd->transport_state |= CMD_T_FABRIC_STOP;
7808 - spin_unlock(&se_cmd->t_state_lock);
7809 + spin_unlock_irq(&se_cmd->t_state_lock);
7810 }
7811 }
7812 spin_unlock_bh(&conn->cmd_lock);
7813 diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
7814 index 877fd7f8a8ed..98125de2f0a6 100644
7815 --- a/drivers/tty/serial/8250/8250_of.c
7816 +++ b/drivers/tty/serial/8250/8250_of.c
7817 @@ -130,6 +130,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
7818 port->flags |= UPF_IOREMAP;
7819 }
7820
7821 + /* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */
7822 + if (of_device_is_compatible(np, "mrvl,mmp-uart"))
7823 + port->regshift = 2;
7824 +
7825 /* Check for registers offset within the devices address range */
7826 if (of_property_read_u32(np, "reg-shift", &prop) == 0)
7827 port->regshift = prop;
7828 diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
7829 index 48bd694a5fa1..bbe5cba21522 100644
7830 --- a/drivers/tty/serial/8250/8250_pci.c
7831 +++ b/drivers/tty/serial/8250/8250_pci.c
7832 @@ -2027,6 +2027,111 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
7833 .setup = pci_default_setup,
7834 .exit = pci_plx9050_exit,
7835 },
7836 + {
7837 + .vendor = PCI_VENDOR_ID_ACCESIO,
7838 + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
7839 + .subvendor = PCI_ANY_ID,
7840 + .subdevice = PCI_ANY_ID,
7841 + .setup = pci_pericom_setup,
7842 + },
7843 + {
7844 + .vendor = PCI_VENDOR_ID_ACCESIO,
7845 + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
7846 + .subvendor = PCI_ANY_ID,
7847 + .subdevice = PCI_ANY_ID,
7848 + .setup = pci_pericom_setup,
7849 + },
7850 + {
7851 + .vendor = PCI_VENDOR_ID_ACCESIO,
7852 + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
7853 + .subvendor = PCI_ANY_ID,
7854 + .subdevice = PCI_ANY_ID,
7855 + .setup = pci_pericom_setup,
7856 + },
7857 + {
7858 + .vendor = PCI_VENDOR_ID_ACCESIO,
7859 + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
7860 + .subvendor = PCI_ANY_ID,
7861 + .subdevice = PCI_ANY_ID,
7862 + .setup = pci_pericom_setup,
7863 + },
7864 + {
7865 + .vendor = PCI_VENDOR_ID_ACCESIO,
7866 + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
7867 + .subvendor = PCI_ANY_ID,
7868 + .subdevice = PCI_ANY_ID,
7869 + .setup = pci_pericom_setup,
7870 + },
7871 + {
7872 + .vendor = PCI_VENDOR_ID_ACCESIO,
7873 + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
7874 + .subvendor = PCI_ANY_ID,
7875 + .subdevice = PCI_ANY_ID,
7876 + .setup = pci_pericom_setup,
7877 + },
7878 + {
7879 + .vendor = PCI_VENDOR_ID_ACCESIO,
7880 + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
7881 + .subvendor = PCI_ANY_ID,
7882 + .subdevice = PCI_ANY_ID,
7883 + .setup = pci_pericom_setup,
7884 + },
7885 + {
7886 + .vendor = PCI_VENDOR_ID_ACCESIO,
7887 + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
7888 + .subvendor = PCI_ANY_ID,
7889 + .subdevice = PCI_ANY_ID,
7890 + .setup = pci_pericom_setup,
7891 + },
7892 + {
7893 + .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
7894 + .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
7895 + .subvendor = PCI_ANY_ID,
7896 + .subdevice = PCI_ANY_ID,
7897 + .setup = pci_pericom_setup,
7898 + },
7899 + {
7900 + .vendor = PCI_VENDOR_ID_ACCESIO,
7901 + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
7902 + .subvendor = PCI_ANY_ID,
7903 + .subdevice = PCI_ANY_ID,
7904 + .setup = pci_pericom_setup,
7905 + },
7906 + {
7907 + .vendor = PCI_VENDOR_ID_ACCESIO,
7908 + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
7909 + .subvendor = PCI_ANY_ID,
7910 + .subdevice = PCI_ANY_ID,
7911 + .setup = pci_pericom_setup,
7912 + },
7913 + {
7914 + .vendor = PCI_VENDOR_ID_ACCESIO,
7915 + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
7916 + .subvendor = PCI_ANY_ID,
7917 + .subdevice = PCI_ANY_ID,
7918 + .setup = pci_pericom_setup,
7919 + },
7920 + {
7921 + .vendor = PCI_VENDOR_ID_ACCESIO,
7922 + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
7923 + .subvendor = PCI_ANY_ID,
7924 + .subdevice = PCI_ANY_ID,
7925 + .setup = pci_pericom_setup,
7926 + },
7927 + {
7928 + .vendor = PCI_VENDOR_ID_ACCESIO,
7929 + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
7930 + .subvendor = PCI_ANY_ID,
7931 + .subdevice = PCI_ANY_ID,
7932 + .setup = pci_pericom_setup,
7933 + },
7934 + {
7935 + .vendor = PCI_VENDOR_ID_ACCESIO,
7936 + .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
7937 + .subvendor = PCI_ANY_ID,
7938 + .subdevice = PCI_ANY_ID,
7939 + .setup = pci_pericom_setup,
7940 + },
7941 /*
7942 * SBS Technologies, Inc., PMC-OCTALPRO 232
7943 */
7944 @@ -4575,10 +4680,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
7945 */
7946 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
7947 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7948 - pbn_pericom_PI7C9X7954 },
7949 + pbn_pericom_PI7C9X7952 },
7950 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
7951 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7952 - pbn_pericom_PI7C9X7954 },
7953 + pbn_pericom_PI7C9X7952 },
7954 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
7955 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7956 pbn_pericom_PI7C9X7954 },
7957 @@ -4587,10 +4692,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
7958 pbn_pericom_PI7C9X7954 },
7959 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
7960 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7961 - pbn_pericom_PI7C9X7954 },
7962 + pbn_pericom_PI7C9X7952 },
7963 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
7964 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7965 - pbn_pericom_PI7C9X7954 },
7966 + pbn_pericom_PI7C9X7952 },
7967 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
7968 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7969 pbn_pericom_PI7C9X7954 },
7970 @@ -4599,10 +4704,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
7971 pbn_pericom_PI7C9X7954 },
7972 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
7973 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7974 - pbn_pericom_PI7C9X7954 },
7975 + pbn_pericom_PI7C9X7952 },
7976 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
7977 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7978 - pbn_pericom_PI7C9X7954 },
7979 + pbn_pericom_PI7C9X7952 },
7980 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
7981 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7982 pbn_pericom_PI7C9X7954 },
7983 @@ -4611,13 +4716,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
7984 pbn_pericom_PI7C9X7954 },
7985 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
7986 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7987 - pbn_pericom_PI7C9X7954 },
7988 + pbn_pericom_PI7C9X7951 },
7989 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
7990 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7991 - pbn_pericom_PI7C9X7954 },
7992 + pbn_pericom_PI7C9X7952 },
7993 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
7994 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7995 - pbn_pericom_PI7C9X7954 },
7996 + pbn_pericom_PI7C9X7952 },
7997 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
7998 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
7999 pbn_pericom_PI7C9X7954 },
8000 @@ -4626,16 +4731,16 @@ static const struct pci_device_id serial_pci_tbl[] = {
8001 pbn_pericom_PI7C9X7954 },
8002 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
8003 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8004 - pbn_pericom_PI7C9X7954 },
8005 + pbn_pericom_PI7C9X7952 },
8006 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
8007 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8008 pbn_pericom_PI7C9X7954 },
8009 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
8010 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8011 - pbn_pericom_PI7C9X7954 },
8012 + pbn_pericom_PI7C9X7952 },
8013 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
8014 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8015 - pbn_pericom_PI7C9X7954 },
8016 + pbn_pericom_PI7C9X7952 },
8017 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
8018 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8019 pbn_pericom_PI7C9X7954 },
8020 @@ -4644,13 +4749,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
8021 pbn_pericom_PI7C9X7954 },
8022 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
8023 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8024 - pbn_pericom_PI7C9X7954 },
8025 + pbn_pericom_PI7C9X7952 },
8026 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
8027 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8028 - pbn_pericom_PI7C9X7958 },
8029 + pbn_pericom_PI7C9X7954 },
8030 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
8031 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8032 - pbn_pericom_PI7C9X7958 },
8033 + pbn_pericom_PI7C9X7954 },
8034 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
8035 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8036 pbn_pericom_PI7C9X7958 },
8037 @@ -4659,19 +4764,19 @@ static const struct pci_device_id serial_pci_tbl[] = {
8038 pbn_pericom_PI7C9X7958 },
8039 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
8040 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8041 - pbn_pericom_PI7C9X7958 },
8042 + pbn_pericom_PI7C9X7954 },
8043 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
8044 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8045 pbn_pericom_PI7C9X7958 },
8046 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
8047 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8048 - pbn_pericom_PI7C9X7958 },
8049 + pbn_pericom_PI7C9X7954 },
8050 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
8051 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8052 pbn_pericom_PI7C9X7958 },
8053 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
8054 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
8055 - pbn_pericom_PI7C9X7958 },
8056 + pbn_pericom_PI7C9X7954 },
8057 /*
8058 * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
8059 */
8060 diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
8061 index 87d8dd90d605..0e3627289047 100644
8062 --- a/drivers/tty/serial/xilinx_uartps.c
8063 +++ b/drivers/tty/serial/xilinx_uartps.c
8064 @@ -362,7 +362,13 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
8065 cdns_uart_handle_tx(dev_id);
8066 isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
8067 }
8068 - if (isrstatus & CDNS_UART_IXR_RXMASK)
8069 +
8070 + /*
8071 + * Skip RX processing if RX is disabled as RXEMPTY will never be set
8072 + * as read bytes will not be removed from the FIFO.
8073 + */
8074 + if (isrstatus & CDNS_UART_IXR_RXMASK &&
8075 + !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
8076 cdns_uart_handle_rx(dev_id, isrstatus);
8077
8078 spin_unlock(&port->lock);
8079 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
8080 index da335899527b..b9a9a07f1ee9 100644
8081 --- a/drivers/tty/vt/vt.c
8082 +++ b/drivers/tty/vt/vt.c
8083 @@ -935,8 +935,11 @@ static void flush_scrollback(struct vc_data *vc)
8084 {
8085 WARN_CONSOLE_UNLOCKED();
8086
8087 + set_origin(vc);
8088 if (vc->vc_sw->con_flush_scrollback)
8089 vc->vc_sw->con_flush_scrollback(vc);
8090 + else
8091 + vc->vc_sw->con_switch(vc);
8092 }
8093
8094 /*
8095 @@ -1506,8 +1509,10 @@ static void csi_J(struct vc_data *vc, int vpar)
8096 count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
8097 start = (unsigned short *)vc->vc_origin;
8098 break;
8099 + case 3: /* include scrollback */
8100 + flush_scrollback(vc);
8101 + /* fallthrough */
8102 case 2: /* erase whole display */
8103 - case 3: /* (and scrollback buffer later) */
8104 vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
8105 count = vc->vc_cols * vc->vc_rows;
8106 start = (unsigned short *)vc->vc_origin;
8107 @@ -1516,13 +1521,7 @@ static void csi_J(struct vc_data *vc, int vpar)
8108 return;
8109 }
8110 scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
8111 - if (vpar == 3) {
8112 - set_origin(vc);
8113 - flush_scrollback(vc);
8114 - if (con_is_visible(vc))
8115 - update_screen(vc);
8116 - } else if (con_should_update(vc))
8117 - do_update_region(vc, (unsigned long) start, count);
8118 + update_region(vc, (unsigned long) start, count);
8119 vc->vc_need_wrap = 0;
8120 }
8121
8122 diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
8123 index 772851bee99b..12025358bb3c 100644
8124 --- a/drivers/usb/chipidea/ci_hdrc_tegra.c
8125 +++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
8126 @@ -130,6 +130,7 @@ static int tegra_udc_remove(struct platform_device *pdev)
8127 {
8128 struct tegra_udc *udc = platform_get_drvdata(pdev);
8129
8130 + ci_hdrc_remove_device(udc->dev);
8131 usb_phy_set_suspend(udc->phy, 1);
8132 clk_disable_unprepare(udc->clk);
8133
8134 diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
8135 index c84c8c189e90..eb8046f87a54 100644
8136 --- a/drivers/usb/typec/tps6598x.c
8137 +++ b/drivers/usb/typec/tps6598x.c
8138 @@ -110,6 +110,20 @@ tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
8139 return 0;
8140 }
8141
8142 +static int tps6598x_block_write(struct tps6598x *tps, u8 reg,
8143 + void *val, size_t len)
8144 +{
8145 + u8 data[TPS_MAX_LEN + 1];
8146 +
8147 + if (!tps->i2c_protocol)
8148 + return regmap_raw_write(tps->regmap, reg, val, len);
8149 +
8150 + data[0] = len;
8151 + memcpy(&data[1], val, len);
8152 +
8153 + return regmap_raw_write(tps->regmap, reg, data, sizeof(data));
8154 +}
8155 +
8156 static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val)
8157 {
8158 return tps6598x_block_read(tps, reg, val, sizeof(u16));
8159 @@ -127,23 +141,23 @@ static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val)
8160
8161 static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val)
8162 {
8163 - return regmap_raw_write(tps->regmap, reg, &val, sizeof(u16));
8164 + return tps6598x_block_write(tps, reg, &val, sizeof(u16));
8165 }
8166
8167 static inline int tps6598x_write32(struct tps6598x *tps, u8 reg, u32 val)
8168 {
8169 - return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
8170 + return tps6598x_block_write(tps, reg, &val, sizeof(u32));
8171 }
8172
8173 static inline int tps6598x_write64(struct tps6598x *tps, u8 reg, u64 val)
8174 {
8175 - return regmap_raw_write(tps->regmap, reg, &val, sizeof(u64));
8176 + return tps6598x_block_write(tps, reg, &val, sizeof(u64));
8177 }
8178
8179 static inline int
8180 tps6598x_write_4cc(struct tps6598x *tps, u8 reg, const char *val)
8181 {
8182 - return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
8183 + return tps6598x_block_write(tps, reg, &val, sizeof(u32));
8184 }
8185
8186 static int tps6598x_read_partner_identity(struct tps6598x *tps)
8187 @@ -229,8 +243,8 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
8188 return -EBUSY;
8189
8190 if (in_len) {
8191 - ret = regmap_raw_write(tps->regmap, TPS_REG_DATA1,
8192 - in_data, in_len);
8193 + ret = tps6598x_block_write(tps, TPS_REG_DATA1,
8194 + in_data, in_len);
8195 if (ret)
8196 return ret;
8197 }
8198 diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
8199 index 5a0db6dec8d1..aaee1e6584e6 100644
8200 --- a/fs/9p/v9fs_vfs.h
8201 +++ b/fs/9p/v9fs_vfs.h
8202 @@ -40,6 +40,9 @@
8203 */
8204 #define P9_LOCK_TIMEOUT (30*HZ)
8205
8206 +/* flags for v9fs_stat2inode() & v9fs_stat2inode_dotl() */
8207 +#define V9FS_STAT2INODE_KEEP_ISIZE 1
8208 +
8209 extern struct file_system_type v9fs_fs_type;
8210 extern const struct address_space_operations v9fs_addr_operations;
8211 extern const struct file_operations v9fs_file_operations;
8212 @@ -61,8 +64,10 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
8213 struct inode *inode, umode_t mode, dev_t);
8214 void v9fs_evict_inode(struct inode *inode);
8215 ino_t v9fs_qid2ino(struct p9_qid *qid);
8216 -void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
8217 -void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *);
8218 +void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
8219 + struct super_block *sb, unsigned int flags);
8220 +void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
8221 + unsigned int flags);
8222 int v9fs_dir_release(struct inode *inode, struct file *filp);
8223 int v9fs_file_open(struct inode *inode, struct file *file);
8224 void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
8225 @@ -83,4 +88,18 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
8226 }
8227
8228 int v9fs_open_to_dotl_flags(int flags);
8229 +
8230 +static inline void v9fs_i_size_write(struct inode *inode, loff_t i_size)
8231 +{
8232 + /*
8233 + * 32-bit need the lock, concurrent updates could break the
8234 + * sequences and make i_size_read() loop forever.
8235 + * 64-bit updates are atomic and can skip the locking.
8236 + */
8237 + if (sizeof(i_size) > sizeof(long))
8238 + spin_lock(&inode->i_lock);
8239 + i_size_write(inode, i_size);
8240 + if (sizeof(i_size) > sizeof(long))
8241 + spin_unlock(&inode->i_lock);
8242 +}
8243 #endif
8244 diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
8245 index ab3d5f5dbb00..c87e6d6ec069 100644
8246 --- a/fs/9p/vfs_file.c
8247 +++ b/fs/9p/vfs_file.c
8248 @@ -442,7 +442,11 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
8249 i_size = i_size_read(inode);
8250 if (iocb->ki_pos > i_size) {
8251 inode_add_bytes(inode, iocb->ki_pos - i_size);
8252 - i_size_write(inode, iocb->ki_pos);
8253 + /*
8254 + * Need to serialize against i_size_write() in
8255 + * v9fs_stat2inode()
8256 + */
8257 + v9fs_i_size_write(inode, iocb->ki_pos);
8258 }
8259 return retval;
8260 }
8261 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
8262 index 85ff859d3af5..72b779bc0942 100644
8263 --- a/fs/9p/vfs_inode.c
8264 +++ b/fs/9p/vfs_inode.c
8265 @@ -538,7 +538,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
8266 if (retval)
8267 goto error;
8268
8269 - v9fs_stat2inode(st, inode, sb);
8270 + v9fs_stat2inode(st, inode, sb, 0);
8271 v9fs_cache_inode_get_cookie(inode);
8272 unlock_new_inode(inode);
8273 return inode;
8274 @@ -1092,7 +1092,7 @@ v9fs_vfs_getattr(const struct path *path, struct kstat *stat,
8275 if (IS_ERR(st))
8276 return PTR_ERR(st);
8277
8278 - v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb);
8279 + v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
8280 generic_fillattr(d_inode(dentry), stat);
8281
8282 p9stat_free(st);
8283 @@ -1170,12 +1170,13 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
8284 * @stat: Plan 9 metadata (mistat) structure
8285 * @inode: inode to populate
8286 * @sb: superblock of filesystem
8287 + * @flags: control flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
8288 *
8289 */
8290
8291 void
8292 v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
8293 - struct super_block *sb)
8294 + struct super_block *sb, unsigned int flags)
8295 {
8296 umode_t mode;
8297 char ext[32];
8298 @@ -1216,10 +1217,11 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
8299 mode = p9mode2perm(v9ses, stat);
8300 mode |= inode->i_mode & ~S_IALLUGO;
8301 inode->i_mode = mode;
8302 - i_size_write(inode, stat->length);
8303
8304 + if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
8305 + v9fs_i_size_write(inode, stat->length);
8306 /* not real number of blocks, but 512 byte ones ... */
8307 - inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
8308 + inode->i_blocks = (stat->length + 512 - 1) >> 9;
8309 v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
8310 }
8311
8312 @@ -1416,9 +1418,9 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
8313 {
8314 int umode;
8315 dev_t rdev;
8316 - loff_t i_size;
8317 struct p9_wstat *st;
8318 struct v9fs_session_info *v9ses;
8319 + unsigned int flags;
8320
8321 v9ses = v9fs_inode2v9ses(inode);
8322 st = p9_client_stat(fid);
8323 @@ -1431,16 +1433,13 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
8324 if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
8325 goto out;
8326
8327 - spin_lock(&inode->i_lock);
8328 /*
8329 * We don't want to refresh inode->i_size,
8330 * because we may have cached data
8331 */
8332 - i_size = inode->i_size;
8333 - v9fs_stat2inode(st, inode, inode->i_sb);
8334 - if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
8335 - inode->i_size = i_size;
8336 - spin_unlock(&inode->i_lock);
8337 + flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
8338 + V9FS_STAT2INODE_KEEP_ISIZE : 0;
8339 + v9fs_stat2inode(st, inode, inode->i_sb, flags);
8340 out:
8341 p9stat_free(st);
8342 kfree(st);
8343 diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
8344 index 4823e1c46999..a950a927a626 100644
8345 --- a/fs/9p/vfs_inode_dotl.c
8346 +++ b/fs/9p/vfs_inode_dotl.c
8347 @@ -143,7 +143,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
8348 if (retval)
8349 goto error;
8350
8351 - v9fs_stat2inode_dotl(st, inode);
8352 + v9fs_stat2inode_dotl(st, inode, 0);
8353 v9fs_cache_inode_get_cookie(inode);
8354 retval = v9fs_get_acl(inode, fid);
8355 if (retval)
8356 @@ -496,7 +496,7 @@ v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat,
8357 if (IS_ERR(st))
8358 return PTR_ERR(st);
8359
8360 - v9fs_stat2inode_dotl(st, d_inode(dentry));
8361 + v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
8362 generic_fillattr(d_inode(dentry), stat);
8363 /* Change block size to what the server returned */
8364 stat->blksize = st->st_blksize;
8365 @@ -607,11 +607,13 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
8366 * v9fs_stat2inode_dotl - populate an inode structure with stat info
8367 * @stat: stat structure
8368 * @inode: inode to populate
8369 + * @flags: ctrl flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
8370 *
8371 */
8372
8373 void
8374 -v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
8375 +v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
8376 + unsigned int flags)
8377 {
8378 umode_t mode;
8379 struct v9fs_inode *v9inode = V9FS_I(inode);
8380 @@ -631,7 +633,8 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
8381 mode |= inode->i_mode & ~S_IALLUGO;
8382 inode->i_mode = mode;
8383
8384 - i_size_write(inode, stat->st_size);
8385 + if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
8386 + v9fs_i_size_write(inode, stat->st_size);
8387 inode->i_blocks = stat->st_blocks;
8388 } else {
8389 if (stat->st_result_mask & P9_STATS_ATIME) {
8390 @@ -661,8 +664,9 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
8391 }
8392 if (stat->st_result_mask & P9_STATS_RDEV)
8393 inode->i_rdev = new_decode_dev(stat->st_rdev);
8394 - if (stat->st_result_mask & P9_STATS_SIZE)
8395 - i_size_write(inode, stat->st_size);
8396 + if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
8397 + stat->st_result_mask & P9_STATS_SIZE)
8398 + v9fs_i_size_write(inode, stat->st_size);
8399 if (stat->st_result_mask & P9_STATS_BLOCKS)
8400 inode->i_blocks = stat->st_blocks;
8401 }
8402 @@ -928,9 +932,9 @@ v9fs_vfs_get_link_dotl(struct dentry *dentry,
8403
8404 int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
8405 {
8406 - loff_t i_size;
8407 struct p9_stat_dotl *st;
8408 struct v9fs_session_info *v9ses;
8409 + unsigned int flags;
8410
8411 v9ses = v9fs_inode2v9ses(inode);
8412 st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
8413 @@ -942,16 +946,13 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
8414 if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
8415 goto out;
8416
8417 - spin_lock(&inode->i_lock);
8418 /*
8419 * We don't want to refresh inode->i_size,
8420 * because we may have cached data
8421 */
8422 - i_size = inode->i_size;
8423 - v9fs_stat2inode_dotl(st, inode);
8424 - if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
8425 - inode->i_size = i_size;
8426 - spin_unlock(&inode->i_lock);
8427 + flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
8428 + V9FS_STAT2INODE_KEEP_ISIZE : 0;
8429 + v9fs_stat2inode_dotl(st, inode, flags);
8430 out:
8431 kfree(st);
8432 return 0;
8433 diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
8434 index 48ce50484e80..eeab9953af89 100644
8435 --- a/fs/9p/vfs_super.c
8436 +++ b/fs/9p/vfs_super.c
8437 @@ -172,7 +172,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
8438 goto release_sb;
8439 }
8440 d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
8441 - v9fs_stat2inode_dotl(st, d_inode(root));
8442 + v9fs_stat2inode_dotl(st, d_inode(root), 0);
8443 kfree(st);
8444 } else {
8445 struct p9_wstat *st = NULL;
8446 @@ -183,7 +183,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
8447 }
8448
8449 d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
8450 - v9fs_stat2inode(st, d_inode(root), sb);
8451 + v9fs_stat2inode(st, d_inode(root), sb, 0);
8452
8453 p9stat_free(st);
8454 kfree(st);
8455 diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
8456 index 3b66c957ea6f..5810463dc6d2 100644
8457 --- a/fs/btrfs/acl.c
8458 +++ b/fs/btrfs/acl.c
8459 @@ -9,6 +9,7 @@
8460 #include <linux/posix_acl_xattr.h>
8461 #include <linux/posix_acl.h>
8462 #include <linux/sched.h>
8463 +#include <linux/sched/mm.h>
8464 #include <linux/slab.h>
8465
8466 #include "ctree.h"
8467 @@ -72,8 +73,16 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
8468 }
8469
8470 if (acl) {
8471 + unsigned int nofs_flag;
8472 +
8473 size = posix_acl_xattr_size(acl->a_count);
8474 + /*
8475 + * We're holding a transaction handle, so use a NOFS memory
8476 + * allocation context to avoid deadlock if reclaim happens.
8477 + */
8478 + nofs_flag = memalloc_nofs_save();
8479 value = kmalloc(size, GFP_KERNEL);
8480 + memalloc_nofs_restore(nofs_flag);
8481 if (!value) {
8482 ret = -ENOMEM;
8483 goto out;
8484 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
8485 index d96d1390068a..b4f61a3d560a 100644
8486 --- a/fs/btrfs/disk-io.c
8487 +++ b/fs/btrfs/disk-io.c
8488 @@ -17,6 +17,7 @@
8489 #include <linux/semaphore.h>
8490 #include <linux/error-injection.h>
8491 #include <linux/crc32c.h>
8492 +#include <linux/sched/mm.h>
8493 #include <asm/unaligned.h>
8494 #include "ctree.h"
8495 #include "disk-io.h"
8496 @@ -1236,10 +1237,17 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
8497 struct btrfs_root *tree_root = fs_info->tree_root;
8498 struct btrfs_root *root;
8499 struct btrfs_key key;
8500 + unsigned int nofs_flag;
8501 int ret = 0;
8502 uuid_le uuid = NULL_UUID_LE;
8503
8504 + /*
8505 + * We're holding a transaction handle, so use a NOFS memory allocation
8506 + * context to avoid deadlock if reclaim happens.
8507 + */
8508 + nofs_flag = memalloc_nofs_save();
8509 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
8510 + memalloc_nofs_restore(nofs_flag);
8511 if (!root)
8512 return ERR_PTR(-ENOMEM);
8513
8514 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
8515 index 79f82f2ec4d5..90b0a6eff535 100644
8516 --- a/fs/btrfs/extent_io.c
8517 +++ b/fs/btrfs/extent_io.c
8518 @@ -3002,11 +3002,11 @@ static int __do_readpage(struct extent_io_tree *tree,
8519 */
8520 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
8521 prev_em_start && *prev_em_start != (u64)-1 &&
8522 - *prev_em_start != em->orig_start)
8523 + *prev_em_start != em->start)
8524 force_bio_submit = true;
8525
8526 if (prev_em_start)
8527 - *prev_em_start = em->orig_start;
8528 + *prev_em_start = em->start;
8529
8530 free_extent_map(em);
8531 em = NULL;
8532 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
8533 index 285f64f2de5f..c13f62182513 100644
8534 --- a/fs/btrfs/volumes.c
8535 +++ b/fs/btrfs/volumes.c
8536 @@ -6425,10 +6425,10 @@ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
8537 }
8538
8539 if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
8540 - (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
8541 + (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
8542 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
8543 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
8544 - (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
8545 + (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
8546 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
8547 num_stripes != 1)) {
8548 btrfs_err(fs_info,
8549 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
8550 index 9dcaed031843..80f33582059e 100644
8551 --- a/fs/cifs/cifsglob.h
8552 +++ b/fs/cifs/cifsglob.h
8553 @@ -235,6 +235,8 @@ struct smb_version_operations {
8554 int * (*get_credits_field)(struct TCP_Server_Info *, const int);
8555 unsigned int (*get_credits)(struct mid_q_entry *);
8556 __u64 (*get_next_mid)(struct TCP_Server_Info *);
8557 + void (*revert_current_mid)(struct TCP_Server_Info *server,
8558 + const unsigned int val);
8559 /* data offset from read response message */
8560 unsigned int (*read_data_offset)(char *);
8561 /*
8562 @@ -756,6 +758,22 @@ get_next_mid(struct TCP_Server_Info *server)
8563 return cpu_to_le16(mid);
8564 }
8565
8566 +static inline void
8567 +revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
8568 +{
8569 + if (server->ops->revert_current_mid)
8570 + server->ops->revert_current_mid(server, val);
8571 +}
8572 +
8573 +static inline void
8574 +revert_current_mid_from_hdr(struct TCP_Server_Info *server,
8575 + const struct smb2_sync_hdr *shdr)
8576 +{
8577 + unsigned int num = le16_to_cpu(shdr->CreditCharge);
8578 +
8579 + return revert_current_mid(server, num > 0 ? num : 1);
8580 +}
8581 +
8582 static inline __u16
8583 get_mid(const struct smb_hdr *smb)
8584 {
8585 @@ -1391,6 +1409,7 @@ struct mid_q_entry {
8586 struct kref refcount;
8587 struct TCP_Server_Info *server; /* server corresponding to this mid */
8588 __u64 mid; /* multiplex id */
8589 + __u16 credits; /* number of credits consumed by this mid */
8590 __u32 pid; /* process id */
8591 __u32 sequence_number; /* for CIFS signing */
8592 unsigned long when_alloc; /* when mid was created */
8593 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
8594 index 23db881daab5..08761a6a039d 100644
8595 --- a/fs/cifs/file.c
8596 +++ b/fs/cifs/file.c
8597 @@ -2871,14 +2871,16 @@ cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
8598 * these pages but not on the region from pos to ppos+len-1.
8599 */
8600 written = cifs_user_writev(iocb, from);
8601 - if (written > 0 && CIFS_CACHE_READ(cinode)) {
8602 + if (CIFS_CACHE_READ(cinode)) {
8603 /*
8604 - * Windows 7 server can delay breaking level2 oplock if a write
8605 - * request comes - break it on the client to prevent reading
8606 - * an old data.
8607 + * We have read level caching and we have just sent a write
8608 + * request to the server thus making data in the cache stale.
8609 + * Zap the cache and set oplock/lease level to NONE to avoid
8610 + * reading stale data from the cache. All subsequent read
8611 + * operations will read new data from the server.
8612 */
8613 cifs_zap_mapping(inode);
8614 - cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
8615 + cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
8616 inode);
8617 cinode->oplock = 0;
8618 }
8619 diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
8620 index 7b8b58fb4d3f..58700d2ba8cd 100644
8621 --- a/fs/cifs/smb2misc.c
8622 +++ b/fs/cifs/smb2misc.c
8623 @@ -517,7 +517,6 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
8624 __u8 lease_state;
8625 struct list_head *tmp;
8626 struct cifsFileInfo *cfile;
8627 - struct TCP_Server_Info *server = tcon->ses->server;
8628 struct cifs_pending_open *open;
8629 struct cifsInodeInfo *cinode;
8630 int ack_req = le32_to_cpu(rsp->Flags &
8631 @@ -537,13 +536,25 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
8632 cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
8633 le32_to_cpu(rsp->NewLeaseState));
8634
8635 - server->ops->set_oplock_level(cinode, lease_state, 0, NULL);
8636 -
8637 if (ack_req)
8638 cfile->oplock_break_cancelled = false;
8639 else
8640 cfile->oplock_break_cancelled = true;
8641
8642 + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
8643 +
8644 + /*
8645 + * Set or clear flags depending on the lease state being READ.
8646 + * HANDLE caching flag should be added when the client starts
8647 + * to defer closing remote file handles with HANDLE leases.
8648 + */
8649 + if (lease_state & SMB2_LEASE_READ_CACHING_HE)
8650 + set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
8651 + &cinode->flags);
8652 + else
8653 + clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
8654 + &cinode->flags);
8655 +
8656 queue_work(cifsoplockd_wq, &cfile->oplock_break);
8657 kfree(lw);
8658 return true;
8659 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
8660 index 237d7281ada3..d4d7d61a6fe2 100644
8661 --- a/fs/cifs/smb2ops.c
8662 +++ b/fs/cifs/smb2ops.c
8663 @@ -204,6 +204,15 @@ smb2_get_next_mid(struct TCP_Server_Info *server)
8664 return mid;
8665 }
8666
8667 +static void
8668 +smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
8669 +{
8670 + spin_lock(&GlobalMid_Lock);
8671 + if (server->CurrentMid >= val)
8672 + server->CurrentMid -= val;
8673 + spin_unlock(&GlobalMid_Lock);
8674 +}
8675 +
8676 static struct mid_q_entry *
8677 smb2_find_mid(struct TCP_Server_Info *server, char *buf)
8678 {
8679 @@ -2300,6 +2309,15 @@ smb2_downgrade_oplock(struct TCP_Server_Info *server,
8680 server->ops->set_oplock_level(cinode, 0, 0, NULL);
8681 }
8682
8683 +static void
8684 +smb21_downgrade_oplock(struct TCP_Server_Info *server,
8685 + struct cifsInodeInfo *cinode, bool set_level2)
8686 +{
8687 + server->ops->set_oplock_level(cinode,
8688 + set_level2 ? SMB2_LEASE_READ_CACHING_HE :
8689 + 0, 0, NULL);
8690 +}
8691 +
8692 static void
8693 smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
8694 unsigned int epoch, bool *purge_cache)
8695 @@ -3247,6 +3265,7 @@ struct smb_version_operations smb20_operations = {
8696 .get_credits = smb2_get_credits,
8697 .wait_mtu_credits = cifs_wait_mtu_credits,
8698 .get_next_mid = smb2_get_next_mid,
8699 + .revert_current_mid = smb2_revert_current_mid,
8700 .read_data_offset = smb2_read_data_offset,
8701 .read_data_length = smb2_read_data_length,
8702 .map_error = map_smb2_to_linux_error,
8703 @@ -3341,6 +3360,7 @@ struct smb_version_operations smb21_operations = {
8704 .get_credits = smb2_get_credits,
8705 .wait_mtu_credits = smb2_wait_mtu_credits,
8706 .get_next_mid = smb2_get_next_mid,
8707 + .revert_current_mid = smb2_revert_current_mid,
8708 .read_data_offset = smb2_read_data_offset,
8709 .read_data_length = smb2_read_data_length,
8710 .map_error = map_smb2_to_linux_error,
8711 @@ -3351,7 +3371,7 @@ struct smb_version_operations smb21_operations = {
8712 .print_stats = smb2_print_stats,
8713 .is_oplock_break = smb2_is_valid_oplock_break,
8714 .handle_cancelled_mid = smb2_handle_cancelled_mid,
8715 - .downgrade_oplock = smb2_downgrade_oplock,
8716 + .downgrade_oplock = smb21_downgrade_oplock,
8717 .need_neg = smb2_need_neg,
8718 .negotiate = smb2_negotiate,
8719 .negotiate_wsize = smb2_negotiate_wsize,
8720 @@ -3436,6 +3456,7 @@ struct smb_version_operations smb30_operations = {
8721 .get_credits = smb2_get_credits,
8722 .wait_mtu_credits = smb2_wait_mtu_credits,
8723 .get_next_mid = smb2_get_next_mid,
8724 + .revert_current_mid = smb2_revert_current_mid,
8725 .read_data_offset = smb2_read_data_offset,
8726 .read_data_length = smb2_read_data_length,
8727 .map_error = map_smb2_to_linux_error,
8728 @@ -3447,7 +3468,7 @@ struct smb_version_operations smb30_operations = {
8729 .dump_share_caps = smb2_dump_share_caps,
8730 .is_oplock_break = smb2_is_valid_oplock_break,
8731 .handle_cancelled_mid = smb2_handle_cancelled_mid,
8732 - .downgrade_oplock = smb2_downgrade_oplock,
8733 + .downgrade_oplock = smb21_downgrade_oplock,
8734 .need_neg = smb2_need_neg,
8735 .negotiate = smb2_negotiate,
8736 .negotiate_wsize = smb2_negotiate_wsize,
8737 @@ -3540,6 +3561,7 @@ struct smb_version_operations smb311_operations = {
8738 .get_credits = smb2_get_credits,
8739 .wait_mtu_credits = smb2_wait_mtu_credits,
8740 .get_next_mid = smb2_get_next_mid,
8741 + .revert_current_mid = smb2_revert_current_mid,
8742 .read_data_offset = smb2_read_data_offset,
8743 .read_data_length = smb2_read_data_length,
8744 .map_error = map_smb2_to_linux_error,
8745 @@ -3551,7 +3573,7 @@ struct smb_version_operations smb311_operations = {
8746 .dump_share_caps = smb2_dump_share_caps,
8747 .is_oplock_break = smb2_is_valid_oplock_break,
8748 .handle_cancelled_mid = smb2_handle_cancelled_mid,
8749 - .downgrade_oplock = smb2_downgrade_oplock,
8750 + .downgrade_oplock = smb21_downgrade_oplock,
8751 .need_neg = smb2_need_neg,
8752 .negotiate = smb2_negotiate,
8753 .negotiate_wsize = smb2_negotiate_wsize,
8754 diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
8755 index 7b351c65ee46..63264db78b89 100644
8756 --- a/fs/cifs/smb2transport.c
8757 +++ b/fs/cifs/smb2transport.c
8758 @@ -576,6 +576,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
8759 struct TCP_Server_Info *server)
8760 {
8761 struct mid_q_entry *temp;
8762 + unsigned int credits = le16_to_cpu(shdr->CreditCharge);
8763
8764 if (server == NULL) {
8765 cifs_dbg(VFS, "Null TCP session in smb2_mid_entry_alloc\n");
8766 @@ -586,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
8767 memset(temp, 0, sizeof(struct mid_q_entry));
8768 kref_init(&temp->refcount);
8769 temp->mid = le64_to_cpu(shdr->MessageId);
8770 + temp->credits = credits > 0 ? credits : 1;
8771 temp->pid = current->pid;
8772 temp->command = shdr->Command; /* Always LE */
8773 temp->when_alloc = jiffies;
8774 @@ -674,13 +676,18 @@ smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
8775 smb2_seq_num_into_buf(ses->server, shdr);
8776
8777 rc = smb2_get_mid_entry(ses, shdr, &mid);
8778 - if (rc)
8779 + if (rc) {
8780 + revert_current_mid_from_hdr(ses->server, shdr);
8781 return ERR_PTR(rc);
8782 + }
8783 +
8784 rc = smb2_sign_rqst(rqst, ses->server);
8785 if (rc) {
8786 + revert_current_mid_from_hdr(ses->server, shdr);
8787 cifs_delete_mid(mid);
8788 return ERR_PTR(rc);
8789 }
8790 +
8791 return mid;
8792 }
8793
8794 @@ -695,11 +702,14 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
8795 smb2_seq_num_into_buf(server, shdr);
8796
8797 mid = smb2_mid_entry_alloc(shdr, server);
8798 - if (mid == NULL)
8799 + if (mid == NULL) {
8800 + revert_current_mid_from_hdr(server, shdr);
8801 return ERR_PTR(-ENOMEM);
8802 + }
8803
8804 rc = smb2_sign_rqst(rqst, server);
8805 if (rc) {
8806 + revert_current_mid_from_hdr(server, shdr);
8807 DeleteMidQEntry(mid);
8808 return ERR_PTR(rc);
8809 }
8810 diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
8811 index 66348b3d28e6..f2938bd95c40 100644
8812 --- a/fs/cifs/transport.c
8813 +++ b/fs/cifs/transport.c
8814 @@ -638,6 +638,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
8815 cifs_in_send_dec(server);
8816
8817 if (rc < 0) {
8818 + revert_current_mid(server, mid->credits);
8819 server->sequence_number -= 2;
8820 cifs_delete_mid(mid);
8821 }
8822 @@ -842,6 +843,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
8823 for (i = 0; i < num_rqst; i++) {
8824 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
8825 if (IS_ERR(midQ[i])) {
8826 + revert_current_mid(ses->server, i);
8827 for (j = 0; j < i; j++)
8828 cifs_delete_mid(midQ[j]);
8829 mutex_unlock(&ses->server->srv_mutex);
8830 @@ -867,8 +869,10 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
8831 for (i = 0; i < num_rqst; i++)
8832 cifs_save_when_sent(midQ[i]);
8833
8834 - if (rc < 0)
8835 + if (rc < 0) {
8836 + revert_current_mid(ses->server, num_rqst);
8837 ses->server->sequence_number -= 2;
8838 + }
8839
8840 mutex_unlock(&ses->server->srv_mutex);
8841
8842 diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
8843 index c53814539070..553a3f3300ae 100644
8844 --- a/fs/devpts/inode.c
8845 +++ b/fs/devpts/inode.c
8846 @@ -455,6 +455,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
8847 s->s_blocksize_bits = 10;
8848 s->s_magic = DEVPTS_SUPER_MAGIC;
8849 s->s_op = &devpts_sops;
8850 + s->s_d_op = &simple_dentry_operations;
8851 s->s_time_gran = 1;
8852
8853 error = -ENOMEM;
8854 diff --git a/fs/ext2/super.c b/fs/ext2/super.c
8855 index 0c38e31ec938..364e647d87c0 100644
8856 --- a/fs/ext2/super.c
8857 +++ b/fs/ext2/super.c
8858 @@ -761,7 +761,8 @@ static loff_t ext2_max_size(int bits)
8859 {
8860 loff_t res = EXT2_NDIR_BLOCKS;
8861 int meta_blocks;
8862 - loff_t upper_limit;
8863 + unsigned int upper_limit;
8864 + unsigned int ppb = 1 << (bits-2);
8865
8866 /* This is calculated to be the largest file size for a
8867 * dense, file such that the total number of
8868 @@ -775,24 +776,34 @@ static loff_t ext2_max_size(int bits)
8869 /* total blocks in file system block size */
8870 upper_limit >>= (bits - 9);
8871
8872 + /* Compute how many blocks we can address by block tree */
8873 + res += 1LL << (bits-2);
8874 + res += 1LL << (2*(bits-2));
8875 + res += 1LL << (3*(bits-2));
8876 + /* Does block tree limit file size? */
8877 + if (res < upper_limit)
8878 + goto check_lfs;
8879
8880 + res = upper_limit;
8881 + /* How many metadata blocks are needed for addressing upper_limit? */
8882 + upper_limit -= EXT2_NDIR_BLOCKS;
8883 /* indirect blocks */
8884 meta_blocks = 1;
8885 + upper_limit -= ppb;
8886 /* double indirect blocks */
8887 - meta_blocks += 1 + (1LL << (bits-2));
8888 - /* tripple indirect blocks */
8889 - meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
8890 -
8891 - upper_limit -= meta_blocks;
8892 - upper_limit <<= bits;
8893 -
8894 - res += 1LL << (bits-2);
8895 - res += 1LL << (2*(bits-2));
8896 - res += 1LL << (3*(bits-2));
8897 + if (upper_limit < ppb * ppb) {
8898 + meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb);
8899 + res -= meta_blocks;
8900 + goto check_lfs;
8901 + }
8902 + meta_blocks += 1 + ppb;
8903 + upper_limit -= ppb * ppb;
8904 + /* tripple indirect blocks for the rest */
8905 + meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb) +
8906 + DIV_ROUND_UP(upper_limit, ppb*ppb);
8907 + res -= meta_blocks;
8908 +check_lfs:
8909 res <<= bits;
8910 - if (res > upper_limit)
8911 - res = upper_limit;
8912 -
8913 if (res > MAX_LFS_FILESIZE)
8914 res = MAX_LFS_FILESIZE;
8915
8916 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
8917 index 032cf9b92665..2ddf7833350d 100644
8918 --- a/fs/ext4/ext4.h
8919 +++ b/fs/ext4/ext4.h
8920 @@ -435,6 +435,9 @@ struct flex_groups {
8921 /* Flags that are appropriate for non-directories/regular files. */
8922 #define EXT4_OTHER_FLMASK (EXT4_NODUMP_FL | EXT4_NOATIME_FL)
8923
8924 +/* The only flags that should be swapped */
8925 +#define EXT4_FL_SHOULD_SWAP (EXT4_HUGE_FILE_FL | EXT4_EXTENTS_FL)
8926 +
8927 /* Mask out flags that are inappropriate for the given type of inode. */
8928 static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
8929 {
8930 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
8931 index d37dafa1d133..2e76fb55d94a 100644
8932 --- a/fs/ext4/ioctl.c
8933 +++ b/fs/ext4/ioctl.c
8934 @@ -63,18 +63,20 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
8935 loff_t isize;
8936 struct ext4_inode_info *ei1;
8937 struct ext4_inode_info *ei2;
8938 + unsigned long tmp;
8939
8940 ei1 = EXT4_I(inode1);
8941 ei2 = EXT4_I(inode2);
8942
8943 swap(inode1->i_version, inode2->i_version);
8944 - swap(inode1->i_blocks, inode2->i_blocks);
8945 - swap(inode1->i_bytes, inode2->i_bytes);
8946 swap(inode1->i_atime, inode2->i_atime);
8947 swap(inode1->i_mtime, inode2->i_mtime);
8948
8949 memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
8950 - swap(ei1->i_flags, ei2->i_flags);
8951 + tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP;
8952 + ei1->i_flags = (ei2->i_flags & EXT4_FL_SHOULD_SWAP) |
8953 + (ei1->i_flags & ~EXT4_FL_SHOULD_SWAP);
8954 + ei2->i_flags = tmp | (ei2->i_flags & ~EXT4_FL_SHOULD_SWAP);
8955 swap(ei1->i_disksize, ei2->i_disksize);
8956 ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
8957 ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
8958 @@ -115,28 +117,41 @@ static long swap_inode_boot_loader(struct super_block *sb,
8959 int err;
8960 struct inode *inode_bl;
8961 struct ext4_inode_info *ei_bl;
8962 -
8963 - if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
8964 - IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
8965 - ext4_has_inline_data(inode))
8966 - return -EINVAL;
8967 -
8968 - if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
8969 - !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
8970 - return -EPERM;
8971 + qsize_t size, size_bl, diff;
8972 + blkcnt_t blocks;
8973 + unsigned short bytes;
8974
8975 inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
8976 if (IS_ERR(inode_bl))
8977 return PTR_ERR(inode_bl);
8978 ei_bl = EXT4_I(inode_bl);
8979
8980 - filemap_flush(inode->i_mapping);
8981 - filemap_flush(inode_bl->i_mapping);
8982 -
8983 /* Protect orig inodes against a truncate and make sure,
8984 * that only 1 swap_inode_boot_loader is running. */
8985 lock_two_nondirectories(inode, inode_bl);
8986
8987 + if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
8988 + IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
8989 + ext4_has_inline_data(inode)) {
8990 + err = -EINVAL;
8991 + goto journal_err_out;
8992 + }
8993 +
8994 + if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
8995 + !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN)) {
8996 + err = -EPERM;
8997 + goto journal_err_out;
8998 + }
8999 +
9000 + down_write(&EXT4_I(inode)->i_mmap_sem);
9001 + err = filemap_write_and_wait(inode->i_mapping);
9002 + if (err)
9003 + goto err_out;
9004 +
9005 + err = filemap_write_and_wait(inode_bl->i_mapping);
9006 + if (err)
9007 + goto err_out;
9008 +
9009 /* Wait for all existing dio workers */
9010 inode_dio_wait(inode);
9011 inode_dio_wait(inode_bl);
9012 @@ -147,7 +162,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
9013 handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
9014 if (IS_ERR(handle)) {
9015 err = -EINVAL;
9016 - goto journal_err_out;
9017 + goto err_out;
9018 }
9019
9020 /* Protect extent tree against block allocations via delalloc */
9021 @@ -170,6 +185,13 @@ static long swap_inode_boot_loader(struct super_block *sb,
9022 memset(ei_bl->i_data, 0, sizeof(ei_bl->i_data));
9023 }
9024
9025 + err = dquot_initialize(inode);
9026 + if (err)
9027 + goto err_out1;
9028 +
9029 + size = (qsize_t)(inode->i_blocks) * (1 << 9) + inode->i_bytes;
9030 + size_bl = (qsize_t)(inode_bl->i_blocks) * (1 << 9) + inode_bl->i_bytes;
9031 + diff = size - size_bl;
9032 swap_inode_data(inode, inode_bl);
9033
9034 inode->i_ctime = inode_bl->i_ctime = current_time(inode);
9035 @@ -183,27 +205,51 @@ static long swap_inode_boot_loader(struct super_block *sb,
9036
9037 err = ext4_mark_inode_dirty(handle, inode);
9038 if (err < 0) {
9039 + /* No need to update quota information. */
9040 ext4_warning(inode->i_sb,
9041 "couldn't mark inode #%lu dirty (err %d)",
9042 inode->i_ino, err);
9043 /* Revert all changes: */
9044 swap_inode_data(inode, inode_bl);
9045 ext4_mark_inode_dirty(handle, inode);
9046 - } else {
9047 - err = ext4_mark_inode_dirty(handle, inode_bl);
9048 - if (err < 0) {
9049 - ext4_warning(inode_bl->i_sb,
9050 - "couldn't mark inode #%lu dirty (err %d)",
9051 - inode_bl->i_ino, err);
9052 - /* Revert all changes: */
9053 - swap_inode_data(inode, inode_bl);
9054 - ext4_mark_inode_dirty(handle, inode);
9055 - ext4_mark_inode_dirty(handle, inode_bl);
9056 - }
9057 + goto err_out1;
9058 + }
9059 +
9060 + blocks = inode_bl->i_blocks;
9061 + bytes = inode_bl->i_bytes;
9062 + inode_bl->i_blocks = inode->i_blocks;
9063 + inode_bl->i_bytes = inode->i_bytes;
9064 + err = ext4_mark_inode_dirty(handle, inode_bl);
9065 + if (err < 0) {
9066 + /* No need to update quota information. */
9067 + ext4_warning(inode_bl->i_sb,
9068 + "couldn't mark inode #%lu dirty (err %d)",
9069 + inode_bl->i_ino, err);
9070 + goto revert;
9071 + }
9072 +
9073 + /* Bootloader inode should not be counted into quota information. */
9074 + if (diff > 0)
9075 + dquot_free_space(inode, diff);
9076 + else
9077 + err = dquot_alloc_space(inode, -1 * diff);
9078 +
9079 + if (err < 0) {
9080 +revert:
9081 + /* Revert all changes: */
9082 + inode_bl->i_blocks = blocks;
9083 + inode_bl->i_bytes = bytes;
9084 + swap_inode_data(inode, inode_bl);
9085 + ext4_mark_inode_dirty(handle, inode);
9086 + ext4_mark_inode_dirty(handle, inode_bl);
9087 }
9088 +
9089 +err_out1:
9090 ext4_journal_stop(handle);
9091 ext4_double_up_write_data_sem(inode, inode_bl);
9092
9093 +err_out:
9094 + up_write(&EXT4_I(inode)->i_mmap_sem);
9095 journal_err_out:
9096 unlock_two_nondirectories(inode, inode_bl);
9097 iput(inode_bl);
9098 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
9099 index 48421de803b7..3d9b18505c0c 100644
9100 --- a/fs/ext4/resize.c
9101 +++ b/fs/ext4/resize.c
9102 @@ -1960,7 +1960,8 @@ retry:
9103 le16_to_cpu(es->s_reserved_gdt_blocks);
9104 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
9105 n_blocks_count = (ext4_fsblk_t)n_group *
9106 - EXT4_BLOCKS_PER_GROUP(sb);
9107 + EXT4_BLOCKS_PER_GROUP(sb) +
9108 + le32_to_cpu(es->s_first_data_block);
9109 n_group--; /* set to last group number */
9110 }
9111
9112 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
9113 index c0b66a7a795b..914e725c82c4 100644
9114 --- a/fs/jbd2/transaction.c
9115 +++ b/fs/jbd2/transaction.c
9116 @@ -1219,11 +1219,12 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
9117 struct journal_head *jh;
9118 char *committed_data = NULL;
9119
9120 - JBUFFER_TRACE(jh, "entry");
9121 if (jbd2_write_access_granted(handle, bh, true))
9122 return 0;
9123
9124 jh = jbd2_journal_add_journal_head(bh);
9125 + JBUFFER_TRACE(jh, "entry");
9126 +
9127 /*
9128 * Do this first --- it can drop the journal lock, so we want to
9129 * make sure that obtaining the committed_data is done
9130 @@ -1334,15 +1335,17 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
9131
9132 if (is_handle_aborted(handle))
9133 return -EROFS;
9134 - if (!buffer_jbd(bh)) {
9135 - ret = -EUCLEAN;
9136 - goto out;
9137 - }
9138 + if (!buffer_jbd(bh))
9139 + return -EUCLEAN;
9140 +
9141 /*
9142 * We don't grab jh reference here since the buffer must be part
9143 * of the running transaction.
9144 */
9145 jh = bh2jh(bh);
9146 + jbd_debug(5, "journal_head %p\n", jh);
9147 + JBUFFER_TRACE(jh, "entry");
9148 +
9149 /*
9150 * This and the following assertions are unreliable since we may see jh
9151 * in inconsistent state unless we grab bh_state lock. But this is
9152 @@ -1376,9 +1379,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
9153 }
9154
9155 journal = transaction->t_journal;
9156 - jbd_debug(5, "journal_head %p\n", jh);
9157 - JBUFFER_TRACE(jh, "entry");
9158 -
9159 jbd_lock_bh_state(bh);
9160
9161 if (jh->b_modified == 0) {
9162 @@ -1576,14 +1576,21 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
9163 /* However, if the buffer is still owned by a prior
9164 * (committing) transaction, we can't drop it yet... */
9165 JBUFFER_TRACE(jh, "belongs to older transaction");
9166 - /* ... but we CAN drop it from the new transaction if we
9167 - * have also modified it since the original commit. */
9168 + /* ... but we CAN drop it from the new transaction through
9169 + * marking the buffer as freed and set j_next_transaction to
9170 + * the new transaction, so that not only the commit code
9171 + * knows it should clear dirty bits when it is done with the
9172 + * buffer, but also the buffer can be checkpointed only
9173 + * after the new transaction commits. */
9174
9175 - if (jh->b_next_transaction) {
9176 - J_ASSERT(jh->b_next_transaction == transaction);
9177 + set_buffer_freed(bh);
9178 +
9179 + if (!jh->b_next_transaction) {
9180 spin_lock(&journal->j_list_lock);
9181 - jh->b_next_transaction = NULL;
9182 + jh->b_next_transaction = transaction;
9183 spin_unlock(&journal->j_list_lock);
9184 + } else {
9185 + J_ASSERT(jh->b_next_transaction == transaction);
9186
9187 /*
9188 * only drop a reference if this transaction modified
9189 diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
9190 index ff2716f9322e..0b22c39dad47 100644
9191 --- a/fs/kernfs/mount.c
9192 +++ b/fs/kernfs/mount.c
9193 @@ -196,8 +196,10 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
9194 return dentry;
9195
9196 knparent = find_next_ancestor(kn, NULL);
9197 - if (WARN_ON(!knparent))
9198 + if (WARN_ON(!knparent)) {
9199 + dput(dentry);
9200 return ERR_PTR(-EINVAL);
9201 + }
9202
9203 do {
9204 struct dentry *dtmp;
9205 @@ -206,8 +208,10 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
9206 if (kn == knparent)
9207 return dentry;
9208 kntmp = find_next_ancestor(kn, knparent);
9209 - if (WARN_ON(!kntmp))
9210 + if (WARN_ON(!kntmp)) {
9211 + dput(dentry);
9212 return ERR_PTR(-EINVAL);
9213 + }
9214 dtmp = lookup_one_len_unlocked(kntmp->name, dentry,
9215 strlen(kntmp->name));
9216 dput(dentry);
9217 diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
9218 index 3f23b6840547..bf34ddaa2ad7 100644
9219 --- a/fs/nfs/nfs4idmap.c
9220 +++ b/fs/nfs/nfs4idmap.c
9221 @@ -44,6 +44,7 @@
9222 #include <linux/keyctl.h>
9223 #include <linux/key-type.h>
9224 #include <keys/user-type.h>
9225 +#include <keys/request_key_auth-type.h>
9226 #include <linux/module.h>
9227
9228 #include "internal.h"
9229 @@ -59,7 +60,7 @@ static struct key_type key_type_id_resolver_legacy;
9230 struct idmap_legacy_upcalldata {
9231 struct rpc_pipe_msg pipe_msg;
9232 struct idmap_msg idmap_msg;
9233 - struct key_construction *key_cons;
9234 + struct key *authkey;
9235 struct idmap *idmap;
9236 };
9237
9238 @@ -384,7 +385,7 @@ static const match_table_t nfs_idmap_tokens = {
9239 { Opt_find_err, NULL }
9240 };
9241
9242 -static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
9243 +static int nfs_idmap_legacy_upcall(struct key *, void *);
9244 static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
9245 size_t);
9246 static void idmap_release_pipe(struct inode *);
9247 @@ -549,11 +550,12 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
9248 static void
9249 nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
9250 {
9251 - struct key_construction *cons = idmap->idmap_upcall_data->key_cons;
9252 + struct key *authkey = idmap->idmap_upcall_data->authkey;
9253
9254 kfree(idmap->idmap_upcall_data);
9255 idmap->idmap_upcall_data = NULL;
9256 - complete_request_key(cons, ret);
9257 + complete_request_key(authkey, ret);
9258 + key_put(authkey);
9259 }
9260
9261 static void
9262 @@ -563,15 +565,14 @@ nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
9263 nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
9264 }
9265
9266 -static int nfs_idmap_legacy_upcall(struct key_construction *cons,
9267 - const char *op,
9268 - void *aux)
9269 +static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
9270 {
9271 struct idmap_legacy_upcalldata *data;
9272 + struct request_key_auth *rka = get_request_key_auth(authkey);
9273 struct rpc_pipe_msg *msg;
9274 struct idmap_msg *im;
9275 struct idmap *idmap = (struct idmap *)aux;
9276 - struct key *key = cons->key;
9277 + struct key *key = rka->target_key;
9278 int ret = -ENOKEY;
9279
9280 if (!aux)
9281 @@ -586,7 +587,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
9282 msg = &data->pipe_msg;
9283 im = &data->idmap_msg;
9284 data->idmap = idmap;
9285 - data->key_cons = cons;
9286 + data->authkey = key_get(authkey);
9287
9288 ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
9289 if (ret < 0)
9290 @@ -604,7 +605,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
9291 out2:
9292 kfree(data);
9293 out1:
9294 - complete_request_key(cons, ret);
9295 + complete_request_key(authkey, ret);
9296 return ret;
9297 }
9298
9299 @@ -651,9 +652,10 @@ out:
9300 static ssize_t
9301 idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
9302 {
9303 + struct request_key_auth *rka;
9304 struct rpc_inode *rpci = RPC_I(file_inode(filp));
9305 struct idmap *idmap = (struct idmap *)rpci->private;
9306 - struct key_construction *cons;
9307 + struct key *authkey;
9308 struct idmap_msg im;
9309 size_t namelen_in;
9310 int ret = -ENOKEY;
9311 @@ -665,7 +667,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
9312 if (idmap->idmap_upcall_data == NULL)
9313 goto out_noupcall;
9314
9315 - cons = idmap->idmap_upcall_data->key_cons;
9316 + authkey = idmap->idmap_upcall_data->authkey;
9317 + rka = get_request_key_auth(authkey);
9318
9319 if (mlen != sizeof(im)) {
9320 ret = -ENOSPC;
9321 @@ -690,9 +693,9 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
9322
9323 ret = nfs_idmap_read_and_verify_message(&im,
9324 &idmap->idmap_upcall_data->idmap_msg,
9325 - cons->key, cons->authkey);
9326 + rka->target_key, authkey);
9327 if (ret >= 0) {
9328 - key_set_timeout(cons->key, nfs_idmap_cache_timeout);
9329 + key_set_timeout(rka->target_key, nfs_idmap_cache_timeout);
9330 ret = mlen;
9331 }
9332
9333 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
9334 index 8220a168282e..e7abcf7629b3 100644
9335 --- a/fs/nfs/nfs4proc.c
9336 +++ b/fs/nfs/nfs4proc.c
9337 @@ -947,6 +947,13 @@ nfs4_sequence_process_interrupted(struct nfs_client *client,
9338
9339 #endif /* !CONFIG_NFS_V4_1 */
9340
9341 +static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
9342 +{
9343 + res->sr_timestamp = jiffies;
9344 + res->sr_status_flags = 0;
9345 + res->sr_status = 1;
9346 +}
9347 +
9348 static
9349 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
9350 struct nfs4_sequence_res *res,
9351 @@ -958,10 +965,6 @@ void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
9352 args->sa_slot = slot;
9353
9354 res->sr_slot = slot;
9355 - res->sr_timestamp = jiffies;
9356 - res->sr_status_flags = 0;
9357 - res->sr_status = 1;
9358 -
9359 }
9360
9361 int nfs4_setup_sequence(struct nfs_client *client,
9362 @@ -1007,6 +1010,7 @@ int nfs4_setup_sequence(struct nfs_client *client,
9363
9364 trace_nfs4_setup_sequence(session, args);
9365 out_start:
9366 + nfs41_sequence_res_init(res);
9367 rpc_call_start(task);
9368 return 0;
9369
9370 diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
9371 index 3dbd15b47c27..0ec6bce3dd69 100644
9372 --- a/fs/nfs/pagelist.c
9373 +++ b/fs/nfs/pagelist.c
9374 @@ -989,6 +989,17 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
9375 }
9376 }
9377
9378 +static void
9379 +nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
9380 + struct nfs_page *req)
9381 +{
9382 + LIST_HEAD(head);
9383 +
9384 + nfs_list_remove_request(req);
9385 + nfs_list_add_request(req, &head);
9386 + desc->pg_completion_ops->error_cleanup(&head);
9387 +}
9388 +
9389 /**
9390 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
9391 * @desc: destination io descriptor
9392 @@ -1026,10 +1037,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
9393 nfs_page_group_unlock(req);
9394 desc->pg_moreio = 1;
9395 nfs_pageio_doio(desc);
9396 - if (desc->pg_error < 0)
9397 - return 0;
9398 - if (mirror->pg_recoalesce)
9399 - return 0;
9400 + if (desc->pg_error < 0 || mirror->pg_recoalesce)
9401 + goto out_cleanup_subreq;
9402 /* retry add_request for this subreq */
9403 nfs_page_group_lock(req);
9404 continue;
9405 @@ -1062,6 +1071,10 @@ err_ptr:
9406 desc->pg_error = PTR_ERR(subreq);
9407 nfs_page_group_unlock(req);
9408 return 0;
9409 +out_cleanup_subreq:
9410 + if (req != subreq)
9411 + nfs_pageio_cleanup_request(desc, subreq);
9412 + return 0;
9413 }
9414
9415 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
9416 @@ -1080,7 +1093,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
9417 struct nfs_page *req;
9418
9419 req = list_first_entry(&head, struct nfs_page, wb_list);
9420 - nfs_list_remove_request(req);
9421 if (__nfs_pageio_add_request(desc, req))
9422 continue;
9423 if (desc->pg_error < 0) {
9424 @@ -1169,11 +1181,14 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
9425 if (nfs_pgio_has_mirroring(desc))
9426 desc->pg_mirror_idx = midx;
9427 if (!nfs_pageio_add_request_mirror(desc, dupreq))
9428 - goto out_failed;
9429 + goto out_cleanup_subreq;
9430 }
9431
9432 return 1;
9433
9434 +out_cleanup_subreq:
9435 + if (req != dupreq)
9436 + nfs_pageio_cleanup_request(desc, dupreq);
9437 out_failed:
9438 /* remember fatal errors */
9439 if (nfs_error_is_fatal(desc->pg_error))
9440 @@ -1199,7 +1214,7 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
9441 desc->pg_mirror_idx = mirror_idx;
9442 for (;;) {
9443 nfs_pageio_doio(desc);
9444 - if (!mirror->pg_recoalesce)
9445 + if (desc->pg_error < 0 || !mirror->pg_recoalesce)
9446 break;
9447 if (!nfs_do_recoalesce(desc))
9448 break;
9449 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
9450 index d790faff8e47..51d0b7913c04 100644
9451 --- a/fs/nfs/write.c
9452 +++ b/fs/nfs/write.c
9453 @@ -238,9 +238,9 @@ out:
9454 }
9455
9456 /* A writeback failed: mark the page as bad, and invalidate the page cache */
9457 -static void nfs_set_pageerror(struct page *page)
9458 +static void nfs_set_pageerror(struct address_space *mapping)
9459 {
9460 - nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
9461 + nfs_zap_mapping(mapping->host, mapping);
9462 }
9463
9464 /*
9465 @@ -994,7 +994,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
9466 nfs_list_remove_request(req);
9467 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
9468 (hdr->good_bytes < bytes)) {
9469 - nfs_set_pageerror(req->wb_page);
9470 + nfs_set_pageerror(page_file_mapping(req->wb_page));
9471 nfs_context_set_write_error(req->wb_context, hdr->error);
9472 goto remove_req;
9473 }
9474 @@ -1330,7 +1330,8 @@ int nfs_updatepage(struct file *file, struct page *page,
9475 unsigned int offset, unsigned int count)
9476 {
9477 struct nfs_open_context *ctx = nfs_file_open_context(file);
9478 - struct inode *inode = page_file_mapping(page)->host;
9479 + struct address_space *mapping = page_file_mapping(page);
9480 + struct inode *inode = mapping->host;
9481 int status = 0;
9482
9483 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
9484 @@ -1348,7 +1349,7 @@ int nfs_updatepage(struct file *file, struct page *page,
9485
9486 status = nfs_writepage_setup(ctx, page, offset, count);
9487 if (status < 0)
9488 - nfs_set_pageerror(page);
9489 + nfs_set_pageerror(mapping);
9490 else
9491 __set_page_dirty_nobuffers(page);
9492 out:
9493 diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
9494 index 9eb8086ea841..c9cf46e0c040 100644
9495 --- a/fs/nfsd/nfs3proc.c
9496 +++ b/fs/nfsd/nfs3proc.c
9497 @@ -463,8 +463,19 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
9498 &resp->common, nfs3svc_encode_entry);
9499 memcpy(resp->verf, argp->verf, 8);
9500 resp->count = resp->buffer - argp->buffer;
9501 - if (resp->offset)
9502 - xdr_encode_hyper(resp->offset, argp->cookie);
9503 + if (resp->offset) {
9504 + loff_t offset = argp->cookie;
9505 +
9506 + if (unlikely(resp->offset1)) {
9507 + /* we ended up with offset on a page boundary */
9508 + *resp->offset = htonl(offset >> 32);
9509 + *resp->offset1 = htonl(offset & 0xffffffff);
9510 + resp->offset1 = NULL;
9511 + } else {
9512 + xdr_encode_hyper(resp->offset, offset);
9513 + }
9514 + resp->offset = NULL;
9515 + }
9516
9517 RETURN_STATUS(nfserr);
9518 }
9519 @@ -533,6 +544,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
9520 } else {
9521 xdr_encode_hyper(resp->offset, offset);
9522 }
9523 + resp->offset = NULL;
9524 }
9525
9526 RETURN_STATUS(nfserr);
9527 diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
9528 index 9b973f4f7d01..83919116d5cb 100644
9529 --- a/fs/nfsd/nfs3xdr.c
9530 +++ b/fs/nfsd/nfs3xdr.c
9531 @@ -921,6 +921,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
9532 } else {
9533 xdr_encode_hyper(cd->offset, offset64);
9534 }
9535 + cd->offset = NULL;
9536 }
9537
9538 /*
9539 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
9540 index 9c6d1d57b598..bec75600e692 100644
9541 --- a/fs/nfsd/nfs4state.c
9542 +++ b/fs/nfsd/nfs4state.c
9543 @@ -1514,16 +1514,16 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
9544 {
9545 u32 slotsize = slot_bytes(ca);
9546 u32 num = ca->maxreqs;
9547 - int avail;
9548 + unsigned long avail, total_avail;
9549
9550 spin_lock(&nfsd_drc_lock);
9551 - avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
9552 - nfsd_drc_max_mem - nfsd_drc_mem_used);
9553 + total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
9554 + avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
9555 /*
9556 * Never use more than a third of the remaining memory,
9557 * unless it's the only way to give this client a slot:
9558 */
9559 - avail = clamp_t(int, avail, slotsize, avail/3);
9560 + avail = clamp_t(int, avail, slotsize, total_avail/3);
9561 num = min_t(int, num, avail / slotsize);
9562 nfsd_drc_mem_used += num * slotsize;
9563 spin_unlock(&nfsd_drc_lock);
9564 diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
9565 index 39b835d7c445..cb69660d0779 100644
9566 --- a/fs/nfsd/nfsctl.c
9567 +++ b/fs/nfsd/nfsctl.c
9568 @@ -1126,7 +1126,7 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
9569 case 'Y':
9570 case 'y':
9571 case '1':
9572 - if (nn->nfsd_serv)
9573 + if (!nn->nfsd_serv)
9574 return -EBUSY;
9575 nfsd4_end_grace(nn);
9576 break;
9577 diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
9578 index 1cc797a08a5b..75eeee08d848 100644
9579 --- a/fs/overlayfs/copy_up.c
9580 +++ b/fs/overlayfs/copy_up.c
9581 @@ -501,6 +501,24 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
9582 {
9583 int err;
9584
9585 + /*
9586 + * Copy up data first and then xattrs. Writing data after
9587 + * xattrs will remove security.capability xattr automatically.
9588 + */
9589 + if (S_ISREG(c->stat.mode) && !c->metacopy) {
9590 + struct path upperpath, datapath;
9591 +
9592 + ovl_path_upper(c->dentry, &upperpath);
9593 + if (WARN_ON(upperpath.dentry != NULL))
9594 + return -EIO;
9595 + upperpath.dentry = temp;
9596 +
9597 + ovl_path_lowerdata(c->dentry, &datapath);
9598 + err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
9599 + if (err)
9600 + return err;
9601 + }
9602 +
9603 err = ovl_copy_xattr(c->lowerpath.dentry, temp);
9604 if (err)
9605 return err;
9606 @@ -518,19 +536,6 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
9607 return err;
9608 }
9609
9610 - if (S_ISREG(c->stat.mode) && !c->metacopy) {
9611 - struct path upperpath, datapath;
9612 -
9613 - ovl_path_upper(c->dentry, &upperpath);
9614 - BUG_ON(upperpath.dentry != NULL);
9615 - upperpath.dentry = temp;
9616 -
9617 - ovl_path_lowerdata(c->dentry, &datapath);
9618 - err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
9619 - if (err)
9620 - return err;
9621 - }
9622 -
9623 if (c->metacopy) {
9624 err = ovl_check_setxattr(c->dentry, temp, OVL_XATTR_METACOPY,
9625 NULL, 0, -EOPNOTSUPP);
9626 @@ -706,6 +711,8 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
9627 {
9628 struct path upperpath, datapath;
9629 int err;
9630 + char *capability = NULL;
9631 + ssize_t uninitialized_var(cap_size);
9632
9633 ovl_path_upper(c->dentry, &upperpath);
9634 if (WARN_ON(upperpath.dentry == NULL))
9635 @@ -715,15 +722,37 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
9636 if (WARN_ON(datapath.dentry == NULL))
9637 return -EIO;
9638
9639 + if (c->stat.size) {
9640 + err = cap_size = ovl_getxattr(upperpath.dentry, XATTR_NAME_CAPS,
9641 + &capability, 0);
9642 + if (err < 0 && err != -ENODATA)
9643 + goto out;
9644 + }
9645 +
9646 err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
9647 if (err)
9648 - return err;
9649 + goto out_free;
9650 +
9651 + /*
9652 + * Writing to upper file will clear security.capability xattr. We
9653 + * don't want that to happen for normal copy-up operation.
9654 + */
9655 + if (capability) {
9656 + err = ovl_do_setxattr(upperpath.dentry, XATTR_NAME_CAPS,
9657 + capability, cap_size, 0);
9658 + if (err)
9659 + goto out_free;
9660 + }
9661 +
9662
9663 err = vfs_removexattr(upperpath.dentry, OVL_XATTR_METACOPY);
9664 if (err)
9665 - return err;
9666 + goto out_free;
9667
9668 ovl_set_upperdata(d_inode(c->dentry));
9669 +out_free:
9670 + kfree(capability);
9671 +out:
9672 return err;
9673 }
9674
9675 diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
9676 index a3c0d9584312..d9c16ceebfe7 100644
9677 --- a/fs/overlayfs/overlayfs.h
9678 +++ b/fs/overlayfs/overlayfs.h
9679 @@ -277,6 +277,8 @@ int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
9680 int ovl_check_metacopy_xattr(struct dentry *dentry);
9681 bool ovl_is_metacopy_dentry(struct dentry *dentry);
9682 char *ovl_get_redirect_xattr(struct dentry *dentry, int padding);
9683 +ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
9684 + size_t padding);
9685
9686 static inline bool ovl_is_impuredir(struct dentry *dentry)
9687 {
9688 diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
9689 index ace4fe4c39a9..c9a2e3c6d537 100644
9690 --- a/fs/overlayfs/util.c
9691 +++ b/fs/overlayfs/util.c
9692 @@ -867,28 +867,49 @@ bool ovl_is_metacopy_dentry(struct dentry *dentry)
9693 return (oe->numlower > 1);
9694 }
9695
9696 -char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
9697 +ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
9698 + size_t padding)
9699 {
9700 - int res;
9701 - char *s, *next, *buf = NULL;
9702 + ssize_t res;
9703 + char *buf = NULL;
9704
9705 - res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, NULL, 0);
9706 + res = vfs_getxattr(dentry, name, NULL, 0);
9707 if (res < 0) {
9708 if (res == -ENODATA || res == -EOPNOTSUPP)
9709 - return NULL;
9710 + return -ENODATA;
9711 goto fail;
9712 }
9713
9714 - buf = kzalloc(res + padding + 1, GFP_KERNEL);
9715 - if (!buf)
9716 - return ERR_PTR(-ENOMEM);
9717 + if (res != 0) {
9718 + buf = kzalloc(res + padding, GFP_KERNEL);
9719 + if (!buf)
9720 + return -ENOMEM;
9721
9722 - if (res == 0)
9723 - goto invalid;
9724 + res = vfs_getxattr(dentry, name, buf, res);
9725 + if (res < 0)
9726 + goto fail;
9727 + }
9728 + *value = buf;
9729 +
9730 + return res;
9731 +
9732 +fail:
9733 + pr_warn_ratelimited("overlayfs: failed to get xattr %s: err=%zi)\n",
9734 + name, res);
9735 + kfree(buf);
9736 + return res;
9737 +}
9738
9739 - res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, buf, res);
9740 +char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
9741 +{
9742 + int res;
9743 + char *s, *next, *buf = NULL;
9744 +
9745 + res = ovl_getxattr(dentry, OVL_XATTR_REDIRECT, &buf, padding + 1);
9746 + if (res == -ENODATA)
9747 + return NULL;
9748 if (res < 0)
9749 - goto fail;
9750 + return ERR_PTR(res);
9751 if (res == 0)
9752 goto invalid;
9753
9754 @@ -904,15 +925,9 @@ char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
9755 }
9756
9757 return buf;
9758 -
9759 -err_free:
9760 - kfree(buf);
9761 - return ERR_PTR(res);
9762 -fail:
9763 - pr_warn_ratelimited("overlayfs: failed to get redirect (%i)\n", res);
9764 - goto err_free;
9765 invalid:
9766 pr_warn_ratelimited("overlayfs: invalid redirect (%s)\n", buf);
9767 res = -EINVAL;
9768 - goto err_free;
9769 + kfree(buf);
9770 + return ERR_PTR(res);
9771 }
9772 diff --git a/fs/pipe.c b/fs/pipe.c
9773 index bdc5d3c0977d..c51750ed4011 100644
9774 --- a/fs/pipe.c
9775 +++ b/fs/pipe.c
9776 @@ -234,6 +234,14 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
9777 .get = generic_pipe_buf_get,
9778 };
9779
9780 +static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
9781 + .can_merge = 0,
9782 + .confirm = generic_pipe_buf_confirm,
9783 + .release = anon_pipe_buf_release,
9784 + .steal = anon_pipe_buf_steal,
9785 + .get = generic_pipe_buf_get,
9786 +};
9787 +
9788 static const struct pipe_buf_operations packet_pipe_buf_ops = {
9789 .can_merge = 0,
9790 .confirm = generic_pipe_buf_confirm,
9791 @@ -242,6 +250,12 @@ static const struct pipe_buf_operations packet_pipe_buf_ops = {
9792 .get = generic_pipe_buf_get,
9793 };
9794
9795 +void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
9796 +{
9797 + if (buf->ops == &anon_pipe_buf_ops)
9798 + buf->ops = &anon_pipe_buf_nomerge_ops;
9799 +}
9800 +
9801 static ssize_t
9802 pipe_read(struct kiocb *iocb, struct iov_iter *to)
9803 {
9804 diff --git a/fs/splice.c b/fs/splice.c
9805 index b3daa971f597..29e92b506394 100644
9806 --- a/fs/splice.c
9807 +++ b/fs/splice.c
9808 @@ -1593,6 +1593,8 @@ retry:
9809 */
9810 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
9811
9812 + pipe_buf_mark_unmergeable(obuf);
9813 +
9814 obuf->len = len;
9815 opipe->nrbufs++;
9816 ibuf->offset += obuf->len;
9817 @@ -1667,6 +1669,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
9818 */
9819 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
9820
9821 + pipe_buf_mark_unmergeable(obuf);
9822 +
9823 if (obuf->len > len)
9824 obuf->len = len;
9825
9826 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
9827 index d7701d466b60..dd38c97933f1 100644
9828 --- a/include/asm-generic/vmlinux.lds.h
9829 +++ b/include/asm-generic/vmlinux.lds.h
9830 @@ -727,7 +727,7 @@
9831 KEEP(*(.orc_unwind_ip)) \
9832 __stop_orc_unwind_ip = .; \
9833 } \
9834 - . = ALIGN(6); \
9835 + . = ALIGN(2); \
9836 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
9837 __start_orc_unwind = .; \
9838 KEEP(*(.orc_unwind)) \
9839 diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h
9840 new file mode 100644
9841 index 000000000000..a726dd3f1dc6
9842 --- /dev/null
9843 +++ b/include/keys/request_key_auth-type.h
9844 @@ -0,0 +1,36 @@
9845 +/* request_key authorisation token key type
9846 + *
9847 + * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
9848 + * Written by David Howells (dhowells@redhat.com)
9849 + *
9850 + * This program is free software; you can redistribute it and/or
9851 + * modify it under the terms of the GNU General Public Licence
9852 + * as published by the Free Software Foundation; either version
9853 + * 2 of the Licence, or (at your option) any later version.
9854 + */
9855 +
9856 +#ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H
9857 +#define _KEYS_REQUEST_KEY_AUTH_TYPE_H
9858 +
9859 +#include <linux/key.h>
9860 +
9861 +/*
9862 + * Authorisation record for request_key().
9863 + */
9864 +struct request_key_auth {
9865 + struct key *target_key;
9866 + struct key *dest_keyring;
9867 + const struct cred *cred;
9868 + void *callout_info;
9869 + size_t callout_len;
9870 + pid_t pid;
9871 + char op[8];
9872 +} __randomize_layout;
9873 +
9874 +static inline struct request_key_auth *get_request_key_auth(const struct key *key)
9875 +{
9876 + return key->payload.data[0];
9877 +}
9878 +
9879 +
9880 +#endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */
9881 diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
9882 index 4f31f96bbfab..90ac450745f1 100644
9883 --- a/include/kvm/arm_vgic.h
9884 +++ b/include/kvm/arm_vgic.h
9885 @@ -256,7 +256,7 @@ struct vgic_dist {
9886 u64 propbaser;
9887
9888 /* Protects the lpi_list and the count value below. */
9889 - spinlock_t lpi_list_lock;
9890 + raw_spinlock_t lpi_list_lock;
9891 struct list_head lpi_list_head;
9892 int lpi_list_count;
9893
9894 diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
9895 index 6fb0808e87c8..bef2e36c01b4 100644
9896 --- a/include/linux/device-mapper.h
9897 +++ b/include/linux/device-mapper.h
9898 @@ -601,7 +601,7 @@ do { \
9899 */
9900 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
9901
9902 -static inline sector_t to_sector(unsigned long n)
9903 +static inline sector_t to_sector(unsigned long long n)
9904 {
9905 return (n >> SECTOR_SHIFT);
9906 }
9907 diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
9908 index 0fbbcdf0c178..da0af631ded5 100644
9909 --- a/include/linux/hardirq.h
9910 +++ b/include/linux/hardirq.h
9911 @@ -60,8 +60,14 @@ extern void irq_enter(void);
9912 */
9913 extern void irq_exit(void);
9914
9915 +#ifndef arch_nmi_enter
9916 +#define arch_nmi_enter() do { } while (0)
9917 +#define arch_nmi_exit() do { } while (0)
9918 +#endif
9919 +
9920 #define nmi_enter() \
9921 do { \
9922 + arch_nmi_enter(); \
9923 printk_nmi_enter(); \
9924 lockdep_off(); \
9925 ftrace_nmi_enter(); \
9926 @@ -80,6 +86,7 @@ extern void irq_exit(void);
9927 ftrace_nmi_exit(); \
9928 lockdep_on(); \
9929 printk_nmi_exit(); \
9930 + arch_nmi_exit(); \
9931 } while (0)
9932
9933 #endif /* LINUX_HARDIRQ_H */
9934 diff --git a/include/linux/key-type.h b/include/linux/key-type.h
9935 index 05d8fb5a06c4..d3c5ae8ad498 100644
9936 --- a/include/linux/key-type.h
9937 +++ b/include/linux/key-type.h
9938 @@ -17,15 +17,6 @@
9939
9940 #ifdef CONFIG_KEYS
9941
9942 -/*
9943 - * key under-construction record
9944 - * - passed to the request_key actor if supplied
9945 - */
9946 -struct key_construction {
9947 - struct key *key; /* key being constructed */
9948 - struct key *authkey;/* authorisation for key being constructed */
9949 -};
9950 -
9951 /*
9952 * Pre-parsed payload, used by key add, update and instantiate.
9953 *
9954 @@ -47,8 +38,7 @@ struct key_preparsed_payload {
9955 time64_t expiry; /* Expiry time of key */
9956 } __randomize_layout;
9957
9958 -typedef int (*request_key_actor_t)(struct key_construction *key,
9959 - const char *op, void *aux);
9960 +typedef int (*request_key_actor_t)(struct key *auth_key, void *aux);
9961
9962 /*
9963 * Preparsed matching criterion.
9964 @@ -170,20 +160,20 @@ extern int key_instantiate_and_link(struct key *key,
9965 const void *data,
9966 size_t datalen,
9967 struct key *keyring,
9968 - struct key *instkey);
9969 + struct key *authkey);
9970 extern int key_reject_and_link(struct key *key,
9971 unsigned timeout,
9972 unsigned error,
9973 struct key *keyring,
9974 - struct key *instkey);
9975 -extern void complete_request_key(struct key_construction *cons, int error);
9976 + struct key *authkey);
9977 +extern void complete_request_key(struct key *authkey, int error);
9978
9979 static inline int key_negate_and_link(struct key *key,
9980 unsigned timeout,
9981 struct key *keyring,
9982 - struct key *instkey)
9983 + struct key *authkey)
9984 {
9985 - return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey);
9986 + return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey);
9987 }
9988
9989 extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
9990 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
9991 index a03d5e264e5e..23c242a7ac52 100644
9992 --- a/include/linux/kvm_host.h
9993 +++ b/include/linux/kvm_host.h
9994 @@ -633,7 +633,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
9995 struct kvm_memory_slot *dont);
9996 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
9997 unsigned long npages);
9998 -void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
9999 +void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
10000 int kvm_arch_prepare_memory_region(struct kvm *kvm,
10001 struct kvm_memory_slot *memslot,
10002 const struct kvm_userspace_memory_region *mem,
10003 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
10004 index 5a3bb3b7c9ad..3ecd7ea212ae 100644
10005 --- a/include/linux/pipe_fs_i.h
10006 +++ b/include/linux/pipe_fs_i.h
10007 @@ -182,6 +182,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
10008 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
10009 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
10010 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
10011 +void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
10012
10013 extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
10014
10015 diff --git a/include/linux/property.h b/include/linux/property.h
10016 index ac8a1ebc4c1b..1a12364050d8 100644
10017 --- a/include/linux/property.h
10018 +++ b/include/linux/property.h
10019 @@ -258,7 +258,7 @@ struct property_entry {
10020 #define PROPERTY_ENTRY_STRING(_name_, _val_) \
10021 (struct property_entry) { \
10022 .name = _name_, \
10023 - .length = sizeof(_val_), \
10024 + .length = sizeof(const char *), \
10025 .type = DEV_PROP_STRING, \
10026 { .value = { .str = _val_ } }, \
10027 }
10028 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
10029 index a404d475acee..820903ceac4f 100644
10030 --- a/include/linux/skbuff.h
10031 +++ b/include/linux/skbuff.h
10032 @@ -4086,6 +4086,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
10033 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
10034 }
10035
10036 +/* Note: Should be called only if skb_is_gso(skb) is true */
10037 +static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
10038 +{
10039 + return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
10040 +}
10041 +
10042 static inline void skb_gso_reset(struct sk_buff *skb)
10043 {
10044 skb_shinfo(skb)->gso_size = 0;
10045 diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
10046 index b669fe6dbc3b..98f31c7ea23d 100644
10047 --- a/include/net/phonet/pep.h
10048 +++ b/include/net/phonet/pep.h
10049 @@ -63,10 +63,11 @@ struct pnpipehdr {
10050 u8 state_after_reset; /* reset request */
10051 u8 error_code; /* any response */
10052 u8 pep_type; /* status indication */
10053 - u8 data[1];
10054 + u8 data0; /* anything else */
10055 };
10056 + u8 data[];
10057 };
10058 -#define other_pep_type data[1]
10059 +#define other_pep_type data[0]
10060
10061 static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
10062 {
10063 diff --git a/init/main.c b/init/main.c
10064 index 18f8f0140fa0..e083fac08aed 100644
10065 --- a/init/main.c
10066 +++ b/init/main.c
10067 @@ -689,7 +689,6 @@ asmlinkage __visible void __init start_kernel(void)
10068 initrd_start = 0;
10069 }
10070 #endif
10071 - page_ext_init();
10072 kmemleak_init();
10073 debug_objects_mem_init();
10074 setup_per_cpu_pageset();
10075 @@ -1140,6 +1139,8 @@ static noinline void __init kernel_init_freeable(void)
10076 sched_init_smp();
10077
10078 page_alloc_init_late();
10079 + /* Initialize page ext after all struct pages are initialized. */
10080 + page_ext_init();
10081
10082 do_basic_setup();
10083
10084 diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
10085 index 9058317ba9de..4f3138e6ecb2 100644
10086 --- a/kernel/bpf/lpm_trie.c
10087 +++ b/kernel/bpf/lpm_trie.c
10088 @@ -432,6 +432,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
10089 }
10090
10091 if (!node || node->prefixlen != key->prefixlen ||
10092 + node->prefixlen != matchlen ||
10093 (node->flags & LPM_TREE_NODE_FLAG_IM)) {
10094 ret = -ENOENT;
10095 goto out;
10096 diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
10097 index 6a32933cae4f..7cb7a7f98a37 100644
10098 --- a/kernel/bpf/stackmap.c
10099 +++ b/kernel/bpf/stackmap.c
10100 @@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
10101 struct stack_map_irq_work *work;
10102
10103 work = container_of(entry, struct stack_map_irq_work, irq_work);
10104 - up_read(work->sem);
10105 + up_read_non_owner(work->sem);
10106 work->sem = NULL;
10107 }
10108
10109 @@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
10110 } else {
10111 work->sem = &current->mm->mmap_sem;
10112 irq_work_queue(&work->irq_work);
10113 + /*
10114 + * The irq_work will release the mmap_sem with
10115 + * up_read_non_owner(). The rwsem_release() is called
10116 + * here to release the lock from lockdep's perspective.
10117 + */
10118 + rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
10119 }
10120 }
10121
10122 diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
10123 index e578c3999970..e710ac7fbbbf 100644
10124 --- a/kernel/cgroup/cgroup.c
10125 +++ b/kernel/cgroup/cgroup.c
10126 @@ -1998,7 +1998,7 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
10127 struct cgroup_namespace *ns)
10128 {
10129 struct dentry *dentry;
10130 - bool new_sb;
10131 + bool new_sb = false;
10132
10133 dentry = kernfs_mount(fs_type, flags, root->kf_root, magic, &new_sb);
10134
10135 @@ -2008,6 +2008,7 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
10136 */
10137 if (!IS_ERR(dentry) && ns != &init_cgroup_ns) {
10138 struct dentry *nsdentry;
10139 + struct super_block *sb = dentry->d_sb;
10140 struct cgroup *cgrp;
10141
10142 mutex_lock(&cgroup_mutex);
10143 @@ -2018,12 +2019,14 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
10144 spin_unlock_irq(&css_set_lock);
10145 mutex_unlock(&cgroup_mutex);
10146
10147 - nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
10148 + nsdentry = kernfs_node_dentry(cgrp->kn, sb);
10149 dput(dentry);
10150 + if (IS_ERR(nsdentry))
10151 + deactivate_locked_super(sb);
10152 dentry = nsdentry;
10153 }
10154
10155 - if (IS_ERR(dentry) || !new_sb)
10156 + if (!new_sb)
10157 cgroup_put(&root->cgrp);
10158
10159 return dentry;
10160 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
10161 index 15301ed19da6..f7e89c989df7 100644
10162 --- a/kernel/rcu/tree.c
10163 +++ b/kernel/rcu/tree.c
10164 @@ -1689,15 +1689,23 @@ static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
10165 }
10166
10167 /*
10168 - * Awaken the grace-period kthread for the specified flavor of RCU.
10169 - * Don't do a self-awaken, and don't bother awakening when there is
10170 - * nothing for the grace-period kthread to do (as in several CPUs
10171 - * raced to awaken, and we lost), and finally don't try to awaken
10172 - * a kthread that has not yet been created.
10173 + * Awaken the grace-period kthread. Don't do a self-awaken (unless in
10174 + * an interrupt or softirq handler), and don't bother awakening when there
10175 + * is nothing for the grace-period kthread to do (as in several CPUs raced
10176 + * to awaken, and we lost), and finally don't try to awaken a kthread that
10177 + * has not yet been created. If all those checks are passed, track some
10178 + * debug information and awaken.
10179 + *
10180 + * So why do the self-wakeup when in an interrupt or softirq handler
10181 + * in the grace-period kthread's context? Because the kthread might have
10182 + * been interrupted just as it was going to sleep, and just after the final
10183 + * pre-sleep check of the awaken condition. In this case, a wakeup really
10184 + * is required, and is therefore supplied.
10185 */
10186 static void rcu_gp_kthread_wake(struct rcu_state *rsp)
10187 {
10188 - if (current == rsp->gp_kthread ||
10189 + if ((current == rsp->gp_kthread &&
10190 + !in_interrupt() && !in_serving_softirq()) ||
10191 !READ_ONCE(rsp->gp_flags) ||
10192 !rsp->gp_kthread)
10193 return;
10194 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
10195 index 32dea29d05a0..3b86acd5de4e 100644
10196 --- a/kernel/sysctl.c
10197 +++ b/kernel/sysctl.c
10198 @@ -2552,7 +2552,16 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
10199 {
10200 struct do_proc_dointvec_minmax_conv_param *param = data;
10201 if (write) {
10202 - int val = *negp ? -*lvalp : *lvalp;
10203 + int val;
10204 + if (*negp) {
10205 + if (*lvalp > (unsigned long) INT_MAX + 1)
10206 + return -EINVAL;
10207 + val = -*lvalp;
10208 + } else {
10209 + if (*lvalp > (unsigned long) INT_MAX)
10210 + return -EINVAL;
10211 + val = *lvalp;
10212 + }
10213 if ((param->min && *param->min > val) ||
10214 (param->max && *param->max < val))
10215 return -EINVAL;
10216 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
10217 index 17bd0c0dfa98..1f96b292df31 100644
10218 --- a/kernel/trace/trace.c
10219 +++ b/kernel/trace/trace.c
10220 @@ -5606,7 +5606,6 @@ out:
10221 return ret;
10222
10223 fail:
10224 - kfree(iter->trace);
10225 kfree(iter);
10226 __trace_array_put(tr);
10227 mutex_unlock(&trace_types_lock);
10228 diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
10229 index 69a3fe926e8c..e6945b55c688 100644
10230 --- a/kernel/trace/trace_event_perf.c
10231 +++ b/kernel/trace/trace_event_perf.c
10232 @@ -298,15 +298,13 @@ int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe)
10233
10234 if (!p_event->attr.uprobe_path)
10235 return -EINVAL;
10236 - path = kzalloc(PATH_MAX, GFP_KERNEL);
10237 - if (!path)
10238 - return -ENOMEM;
10239 - ret = strncpy_from_user(
10240 - path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
10241 - if (ret == PATH_MAX)
10242 - return -E2BIG;
10243 - if (ret < 0)
10244 - goto out;
10245 +
10246 + path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
10247 + PATH_MAX);
10248 + if (IS_ERR(path)) {
10249 + ret = PTR_ERR(path);
10250 + return (ret == -EINVAL) ? -E2BIG : ret;
10251 + }
10252 if (path[0] == '\0') {
10253 ret = -EINVAL;
10254 goto out;
10255 diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
10256 index eb908ef2ecec..11853e90b649 100644
10257 --- a/kernel/trace/trace_events_hist.c
10258 +++ b/kernel/trace/trace_events_hist.c
10259 @@ -4621,9 +4621,10 @@ static inline void add_to_key(char *compound_key, void *key,
10260 /* ensure NULL-termination */
10261 if (size > key_field->size - 1)
10262 size = key_field->size - 1;
10263 - }
10264
10265 - memcpy(compound_key + key_field->offset, key, size);
10266 + strncpy(compound_key + key_field->offset, (char *)key, size);
10267 + } else
10268 + memcpy(compound_key + key_field->offset, key, size);
10269 }
10270
10271 static void
10272 diff --git a/lib/assoc_array.c b/lib/assoc_array.c
10273 index c6659cb37033..59875eb278ea 100644
10274 --- a/lib/assoc_array.c
10275 +++ b/lib/assoc_array.c
10276 @@ -768,9 +768,11 @@ all_leaves_cluster_together:
10277 new_s0->index_key[i] =
10278 ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
10279
10280 - blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
10281 - pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
10282 - new_s0->index_key[keylen - 1] &= ~blank;
10283 + if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
10284 + blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
10285 + pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
10286 + new_s0->index_key[keylen - 1] &= ~blank;
10287 + }
10288
10289 /* This now reduces to a node splitting exercise for which we'll need
10290 * to regenerate the disparity table.
10291 diff --git a/mm/gup.c b/mm/gup.c
10292 index 1abc8b4afff6..0a5374e6e82d 100644
10293 --- a/mm/gup.c
10294 +++ b/mm/gup.c
10295 @@ -1649,7 +1649,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
10296 if (!pmd_present(pmd))
10297 return 0;
10298
10299 - if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
10300 + if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
10301 + pmd_devmap(pmd))) {
10302 /*
10303 * NUMA hinting faults need to be handled in the GUP
10304 * slowpath for accounting purposes and so that they
10305 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
10306 index d9b8a2490633..6edc6db5ec1b 100644
10307 --- a/mm/memory-failure.c
10308 +++ b/mm/memory-failure.c
10309 @@ -1823,19 +1823,17 @@ static int soft_offline_in_use_page(struct page *page, int flags)
10310 struct page *hpage = compound_head(page);
10311
10312 if (!PageHuge(page) && PageTransHuge(hpage)) {
10313 - lock_page(hpage);
10314 - if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
10315 - unlock_page(hpage);
10316 - if (!PageAnon(hpage))
10317 + lock_page(page);
10318 + if (!PageAnon(page) || unlikely(split_huge_page(page))) {
10319 + unlock_page(page);
10320 + if (!PageAnon(page))
10321 pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
10322 else
10323 pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
10324 - put_hwpoison_page(hpage);
10325 + put_hwpoison_page(page);
10326 return -EBUSY;
10327 }
10328 - unlock_page(hpage);
10329 - get_hwpoison_page(page);
10330 - put_hwpoison_page(hpage);
10331 + unlock_page(page);
10332 }
10333
10334 /*
10335 diff --git a/mm/memory.c b/mm/memory.c
10336 index 281172540a9c..5b3f71bcd1ae 100644
10337 --- a/mm/memory.c
10338 +++ b/mm/memory.c
10339 @@ -3762,10 +3762,13 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
10340 * but allow concurrent faults).
10341 * The mmap_sem may have been released depending on flags and our
10342 * return value. See filemap_fault() and __lock_page_or_retry().
10343 + * If mmap_sem is released, vma may become invalid (for example
10344 + * by other thread calling munmap()).
10345 */
10346 static vm_fault_t do_fault(struct vm_fault *vmf)
10347 {
10348 struct vm_area_struct *vma = vmf->vma;
10349 + struct mm_struct *vm_mm = vma->vm_mm;
10350 vm_fault_t ret;
10351
10352 /*
10353 @@ -3806,7 +3809,7 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
10354
10355 /* preallocated pagetable is unused: free it */
10356 if (vmf->prealloc_pte) {
10357 - pte_free(vma->vm_mm, vmf->prealloc_pte);
10358 + pte_free(vm_mm, vmf->prealloc_pte);
10359 vmf->prealloc_pte = NULL;
10360 }
10361 return ret;
10362 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
10363 index a9de1dbb9a6c..ef99971c13dd 100644
10364 --- a/mm/page_alloc.c
10365 +++ b/mm/page_alloc.c
10366 @@ -4532,11 +4532,11 @@ refill:
10367 /* Even if we own the page, we do not use atomic_set().
10368 * This would break get_page_unless_zero() users.
10369 */
10370 - page_ref_add(page, size - 1);
10371 + page_ref_add(page, size);
10372
10373 /* reset page count bias and offset to start of new frag */
10374 nc->pfmemalloc = page_is_pfmemalloc(page);
10375 - nc->pagecnt_bias = size;
10376 + nc->pagecnt_bias = size + 1;
10377 nc->offset = size;
10378 }
10379
10380 @@ -4552,10 +4552,10 @@ refill:
10381 size = nc->size;
10382 #endif
10383 /* OK, page count is 0, we can safely set it */
10384 - set_page_count(page, size);
10385 + set_page_count(page, size + 1);
10386
10387 /* reset page count bias and offset to start of new frag */
10388 - nc->pagecnt_bias = size;
10389 + nc->pagecnt_bias = size + 1;
10390 offset = size - fragsz;
10391 }
10392
10393 diff --git a/mm/page_ext.c b/mm/page_ext.c
10394 index a9826da84ccb..4961f13b6ec1 100644
10395 --- a/mm/page_ext.c
10396 +++ b/mm/page_ext.c
10397 @@ -398,10 +398,8 @@ void __init page_ext_init(void)
10398 * We know some arch can have a nodes layout such as
10399 * -------------pfn-------------->
10400 * N0 | N1 | N2 | N0 | N1 | N2|....
10401 - *
10402 - * Take into account DEFERRED_STRUCT_PAGE_INIT.
10403 */
10404 - if (early_pfn_to_nid(pfn) != nid)
10405 + if (pfn_to_nid(pfn) != nid)
10406 continue;
10407 if (init_section_page_ext(pfn, nid))
10408 goto oom;
10409 diff --git a/mm/shmem.c b/mm/shmem.c
10410 index b6cf0e8e685b..3c8742655756 100644
10411 --- a/mm/shmem.c
10412 +++ b/mm/shmem.c
10413 @@ -2895,16 +2895,20 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
10414 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
10415 {
10416 struct inode *inode = d_inode(old_dentry);
10417 - int ret;
10418 + int ret = 0;
10419
10420 /*
10421 * No ordinary (disk based) filesystem counts links as inodes;
10422 * but each new link needs a new dentry, pinning lowmem, and
10423 * tmpfs dentries cannot be pruned until they are unlinked.
10424 + * But if an O_TMPFILE file is linked into the tmpfs, the
10425 + * first link must skip that, to get the accounting right.
10426 */
10427 - ret = shmem_reserve_inode(inode->i_sb);
10428 - if (ret)
10429 - goto out;
10430 + if (inode->i_nlink) {
10431 + ret = shmem_reserve_inode(inode->i_sb);
10432 + if (ret)
10433 + goto out;
10434 + }
10435
10436 dir->i_size += BOGO_DIRENT_SIZE;
10437 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
10438 diff --git a/mm/swap.c b/mm/swap.c
10439 index 26fc9b5f1b6c..a3fc028e338e 100644
10440 --- a/mm/swap.c
10441 +++ b/mm/swap.c
10442 @@ -321,11 +321,6 @@ static inline void activate_page_drain(int cpu)
10443 {
10444 }
10445
10446 -static bool need_activate_page_drain(int cpu)
10447 -{
10448 - return false;
10449 -}
10450 -
10451 void activate_page(struct page *page)
10452 {
10453 struct zone *zone = page_zone(page);
10454 @@ -654,13 +649,15 @@ void lru_add_drain(void)
10455 put_cpu();
10456 }
10457
10458 +#ifdef CONFIG_SMP
10459 +
10460 +static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
10461 +
10462 static void lru_add_drain_per_cpu(struct work_struct *dummy)
10463 {
10464 lru_add_drain();
10465 }
10466
10467 -static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
10468 -
10469 /*
10470 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
10471 * kworkers being shut down before our page_alloc_cpu_dead callback is
10472 @@ -703,6 +700,12 @@ void lru_add_drain_all(void)
10473
10474 mutex_unlock(&lock);
10475 }
10476 +#else
10477 +void lru_add_drain_all(void)
10478 +{
10479 + lru_add_drain();
10480 +}
10481 +#endif
10482
10483 /**
10484 * release_pages - batched put_page()
10485 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
10486 index a728fc492557..91a789a46b12 100644
10487 --- a/mm/vmalloc.c
10488 +++ b/mm/vmalloc.c
10489 @@ -2244,7 +2244,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
10490 if (!(area->flags & VM_USERMAP))
10491 return -EINVAL;
10492
10493 - if (kaddr + size > area->addr + area->size)
10494 + if (kaddr + size > area->addr + get_vm_area_size(area))
10495 return -EINVAL;
10496
10497 do {
10498 diff --git a/net/9p/client.c b/net/9p/client.c
10499 index 75b7bf7c7f07..23ec6187dc07 100644
10500 --- a/net/9p/client.c
10501 +++ b/net/9p/client.c
10502 @@ -1073,7 +1073,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
10503 p9_debug(P9_DEBUG_ERROR,
10504 "Please specify a msize of at least 4k\n");
10505 err = -EINVAL;
10506 - goto free_client;
10507 + goto close_trans;
10508 }
10509
10510 err = p9_client_version(clnt);
10511 diff --git a/net/core/filter.c b/net/core/filter.c
10512 index bed9061102f4..eb81e9db4093 100644
10513 --- a/net/core/filter.c
10514 +++ b/net/core/filter.c
10515 @@ -2614,8 +2614,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
10516 u32 off = skb_mac_header_len(skb);
10517 int ret;
10518
10519 - /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
10520 - if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
10521 + if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
10522 return -ENOTSUPP;
10523
10524 ret = skb_cow(skb, len_diff);
10525 @@ -2656,8 +2655,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
10526 u32 off = skb_mac_header_len(skb);
10527 int ret;
10528
10529 - /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
10530 - if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
10531 + if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
10532 return -ENOTSUPP;
10533
10534 ret = skb_unclone(skb, GFP_ATOMIC);
10535 @@ -2782,8 +2780,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
10536 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
10537 int ret;
10538
10539 - /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
10540 - if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
10541 + if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
10542 return -ENOTSUPP;
10543
10544 ret = skb_cow(skb, len_diff);
10545 @@ -2812,8 +2809,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
10546 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
10547 int ret;
10548
10549 - /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
10550 - if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
10551 + if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
10552 return -ENOTSUPP;
10553
10554 ret = skb_unclone(skb, GFP_ATOMIC);
10555 diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
10556 index 97689012b357..12a43a5369a5 100644
10557 --- a/net/ipv4/esp4.c
10558 +++ b/net/ipv4/esp4.c
10559 @@ -325,7 +325,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
10560 skb->len += tailen;
10561 skb->data_len += tailen;
10562 skb->truesize += tailen;
10563 - if (sk)
10564 + if (sk && sk_fullsock(sk))
10565 refcount_add(tailen, &sk->sk_wmem_alloc);
10566
10567 goto out;
10568 diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
10569 index 3cd237b42f44..2fa196325988 100644
10570 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
10571 +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
10572 @@ -846,9 +846,9 @@ static int clusterip_net_init(struct net *net)
10573
10574 static void clusterip_net_exit(struct net *net)
10575 {
10576 +#ifdef CONFIG_PROC_FS
10577 struct clusterip_net *cn = clusterip_pernet(net);
10578
10579 -#ifdef CONFIG_PROC_FS
10580 mutex_lock(&cn->mutex);
10581 proc_remove(cn->procdir);
10582 cn->procdir = NULL;
10583 diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
10584 index 88a7579c23bd..a7d996148eed 100644
10585 --- a/net/ipv6/esp6.c
10586 +++ b/net/ipv6/esp6.c
10587 @@ -293,7 +293,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
10588 skb->len += tailen;
10589 skb->data_len += tailen;
10590 skb->truesize += tailen;
10591 - if (sk)
10592 + if (sk && sk_fullsock(sk))
10593 refcount_add(tailen, &sk->sk_wmem_alloc);
10594
10595 goto out;
10596 diff --git a/net/key/af_key.c b/net/key/af_key.c
10597 index 9d61266526e7..7da629d59717 100644
10598 --- a/net/key/af_key.c
10599 +++ b/net/key/af_key.c
10600 @@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock)
10601 return 0;
10602 }
10603
10604 -static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
10605 - gfp_t allocation, struct sock *sk)
10606 +static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
10607 + struct sock *sk)
10608 {
10609 int err = -ENOBUFS;
10610
10611 - sock_hold(sk);
10612 - if (*skb2 == NULL) {
10613 - if (refcount_read(&skb->users) != 1) {
10614 - *skb2 = skb_clone(skb, allocation);
10615 - } else {
10616 - *skb2 = skb;
10617 - refcount_inc(&skb->users);
10618 - }
10619 - }
10620 - if (*skb2 != NULL) {
10621 - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
10622 - skb_set_owner_r(*skb2, sk);
10623 - skb_queue_tail(&sk->sk_receive_queue, *skb2);
10624 - sk->sk_data_ready(sk);
10625 - *skb2 = NULL;
10626 - err = 0;
10627 - }
10628 + if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
10629 + return err;
10630 +
10631 + skb = skb_clone(skb, allocation);
10632 +
10633 + if (skb) {
10634 + skb_set_owner_r(skb, sk);
10635 + skb_queue_tail(&sk->sk_receive_queue, skb);
10636 + sk->sk_data_ready(sk);
10637 + err = 0;
10638 }
10639 - sock_put(sk);
10640 return err;
10641 }
10642
10643 @@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
10644 {
10645 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
10646 struct sock *sk;
10647 - struct sk_buff *skb2 = NULL;
10648 int err = -ESRCH;
10649
10650 /* XXX Do we need something like netlink_overrun? I think
10651 @@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
10652 * socket.
10653 */
10654 if (pfk->promisc)
10655 - pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
10656 + pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
10657
10658 /* the exact target will be processed later */
10659 if (sk == one_sk)
10660 @@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
10661 continue;
10662 }
10663
10664 - err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
10665 + err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
10666
10667 /* Error is cleared after successful sending to at least one
10668 * registered KM */
10669 @@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
10670 rcu_read_unlock();
10671
10672 if (one_sk != NULL)
10673 - err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
10674 + err = pfkey_broadcast_one(skb, allocation, one_sk);
10675
10676 - kfree_skb(skb2);
10677 kfree_skb(skb);
10678 return err;
10679 }
10680 diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
10681 index 69e831bc317b..54821fb1a960 100644
10682 --- a/net/mac80211/agg-tx.c
10683 +++ b/net/mac80211/agg-tx.c
10684 @@ -8,7 +8,7 @@
10685 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
10686 * Copyright 2007-2010, Intel Corporation
10687 * Copyright(c) 2015-2017 Intel Deutschland GmbH
10688 - * Copyright (C) 2018 Intel Corporation
10689 + * Copyright (C) 2018 - 2019 Intel Corporation
10690 *
10691 * This program is free software; you can redistribute it and/or modify
10692 * it under the terms of the GNU General Public License version 2 as
10693 @@ -366,6 +366,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
10694
10695 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
10696
10697 + ieee80211_agg_stop_txq(sta, tid);
10698 +
10699 spin_unlock_bh(&sta->lock);
10700
10701 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
10702 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
10703 index 716cd6442d86..3deaa01ebee4 100644
10704 --- a/net/mac80211/util.c
10705 +++ b/net/mac80211/util.c
10706 @@ -5,7 +5,7 @@
10707 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
10708 * Copyright 2013-2014 Intel Mobile Communications GmbH
10709 * Copyright (C) 2015-2017 Intel Deutschland GmbH
10710 - * Copyright (C) 2018 Intel Corporation
10711 + * Copyright (C) 2018-2019 Intel Corporation
10712 *
10713 * This program is free software; you can redistribute it and/or modify
10714 * it under the terms of the GNU General Public License version 2 as
10715 @@ -2020,6 +2020,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
10716 case NL80211_IFTYPE_AP_VLAN:
10717 case NL80211_IFTYPE_MONITOR:
10718 break;
10719 + case NL80211_IFTYPE_ADHOC:
10720 + if (sdata->vif.bss_conf.ibss_joined)
10721 + WARN_ON(drv_join_ibss(local, sdata));
10722 + /* fall through */
10723 default:
10724 ieee80211_reconfig_stations(sdata);
10725 /* fall through */
10726 diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
10727 index cad48d07c818..8401cefd9f65 100644
10728 --- a/net/netfilter/ipvs/Kconfig
10729 +++ b/net/netfilter/ipvs/Kconfig
10730 @@ -29,6 +29,7 @@ config IP_VS_IPV6
10731 bool "IPv6 support for IPVS"
10732 depends on IPV6 = y || IP_VS = IPV6
10733 select IP6_NF_IPTABLES
10734 + select NF_DEFRAG_IPV6
10735 ---help---
10736 Add IPv6 support to IPVS.
10737
10738 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
10739 index 7ca926a03b81..3f963ea22277 100644
10740 --- a/net/netfilter/ipvs/ip_vs_core.c
10741 +++ b/net/netfilter/ipvs/ip_vs_core.c
10742 @@ -1536,14 +1536,12 @@ ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
10743 /* sorry, all this trouble for a no-hit :) */
10744 IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
10745 "ip_vs_in: packet continues traversal as normal");
10746 - if (iph->fragoffs) {
10747 - /* Fragment that couldn't be mapped to a conn entry
10748 - * is missing module nf_defrag_ipv6
10749 - */
10750 - IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
10751 +
10752 + /* Fragment couldn't be mapped to a conn entry */
10753 + if (iph->fragoffs)
10754 IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
10755 "unhandled fragment");
10756 - }
10757 +
10758 *verdict = NF_ACCEPT;
10759 return 0;
10760 }
10761 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
10762 index 55a77314340a..8fd8d06454d6 100644
10763 --- a/net/netfilter/ipvs/ip_vs_ctl.c
10764 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
10765 @@ -43,6 +43,7 @@
10766 #ifdef CONFIG_IP_VS_IPV6
10767 #include <net/ipv6.h>
10768 #include <net/ip6_route.h>
10769 +#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
10770 #endif
10771 #include <net/route.h>
10772 #include <net/sock.h>
10773 @@ -895,6 +896,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
10774 {
10775 struct ip_vs_dest *dest;
10776 unsigned int atype, i;
10777 + int ret = 0;
10778
10779 EnterFunction(2);
10780
10781 @@ -905,6 +907,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
10782 atype & IPV6_ADDR_LINKLOCAL) &&
10783 !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6))
10784 return -EINVAL;
10785 +
10786 + ret = nf_defrag_ipv6_enable(svc->ipvs->net);
10787 + if (ret)
10788 + return ret;
10789 } else
10790 #endif
10791 {
10792 @@ -1228,6 +1234,10 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
10793 ret = -EINVAL;
10794 goto out_err;
10795 }
10796 +
10797 + ret = nf_defrag_ipv6_enable(ipvs->net);
10798 + if (ret)
10799 + goto out_err;
10800 }
10801 #endif
10802
10803 diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
10804 index aecadd471e1d..13e1ac333fa4 100644
10805 --- a/net/netfilter/x_tables.c
10806 +++ b/net/netfilter/x_tables.c
10807 @@ -1899,7 +1899,7 @@ static int __init xt_init(void)
10808 seqcount_init(&per_cpu(xt_recseq, i));
10809 }
10810
10811 - xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
10812 + xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
10813 if (!xt)
10814 return -ENOMEM;
10815
10816 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
10817 index 9fc76b19cd3c..db3473540303 100644
10818 --- a/net/phonet/pep.c
10819 +++ b/net/phonet/pep.c
10820 @@ -132,7 +132,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code,
10821 ph->utid = 0;
10822 ph->message_id = id;
10823 ph->pipe_handle = pn->pipe_handle;
10824 - ph->data[0] = code;
10825 + ph->error_code = code;
10826 return pn_skb_send(sk, skb, NULL);
10827 }
10828
10829 @@ -153,7 +153,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
10830 ph->utid = id; /* whatever */
10831 ph->message_id = id;
10832 ph->pipe_handle = pn->pipe_handle;
10833 - ph->data[0] = code;
10834 + ph->error_code = code;
10835 return pn_skb_send(sk, skb, NULL);
10836 }
10837
10838 @@ -208,7 +208,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
10839 struct pnpipehdr *ph;
10840 struct sockaddr_pn dst;
10841 u8 data[4] = {
10842 - oph->data[0], /* PEP type */
10843 + oph->pep_type, /* PEP type */
10844 code, /* error code, at an unusual offset */
10845 PAD, PAD,
10846 };
10847 @@ -221,7 +221,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
10848 ph->utid = oph->utid;
10849 ph->message_id = PNS_PEP_CTRL_RESP;
10850 ph->pipe_handle = oph->pipe_handle;
10851 - ph->data[0] = oph->data[1]; /* CTRL id */
10852 + ph->data0 = oph->data[0]; /* CTRL id */
10853
10854 pn_skb_get_src_sockaddr(oskb, &dst);
10855 return pn_skb_send(sk, skb, &dst);
10856 @@ -272,17 +272,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
10857 return -EINVAL;
10858
10859 hdr = pnp_hdr(skb);
10860 - if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
10861 + if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
10862 net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
10863 - (unsigned int)hdr->data[0]);
10864 + (unsigned int)hdr->pep_type);
10865 return -EOPNOTSUPP;
10866 }
10867
10868 - switch (hdr->data[1]) {
10869 + switch (hdr->data[0]) {
10870 case PN_PEP_IND_FLOW_CONTROL:
10871 switch (pn->tx_fc) {
10872 case PN_LEGACY_FLOW_CONTROL:
10873 - switch (hdr->data[4]) {
10874 + switch (hdr->data[3]) {
10875 case PEP_IND_BUSY:
10876 atomic_set(&pn->tx_credits, 0);
10877 break;
10878 @@ -292,7 +292,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
10879 }
10880 break;
10881 case PN_ONE_CREDIT_FLOW_CONTROL:
10882 - if (hdr->data[4] == PEP_IND_READY)
10883 + if (hdr->data[3] == PEP_IND_READY)
10884 atomic_set(&pn->tx_credits, wake = 1);
10885 break;
10886 }
10887 @@ -301,12 +301,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
10888 case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
10889 if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
10890 break;
10891 - atomic_add(wake = hdr->data[4], &pn->tx_credits);
10892 + atomic_add(wake = hdr->data[3], &pn->tx_credits);
10893 break;
10894
10895 default:
10896 net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
10897 - (unsigned int)hdr->data[1]);
10898 + (unsigned int)hdr->data[0]);
10899 return -EOPNOTSUPP;
10900 }
10901 if (wake)
10902 @@ -318,7 +318,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
10903 {
10904 struct pep_sock *pn = pep_sk(sk);
10905 struct pnpipehdr *hdr = pnp_hdr(skb);
10906 - u8 n_sb = hdr->data[0];
10907 + u8 n_sb = hdr->data0;
10908
10909 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
10910 __skb_pull(skb, sizeof(*hdr));
10911 @@ -506,7 +506,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
10912 return -ECONNREFUSED;
10913
10914 /* Parse sub-blocks */
10915 - n_sb = hdr->data[4];
10916 + n_sb = hdr->data[3];
10917 while (n_sb > 0) {
10918 u8 type, buf[6], len = sizeof(buf);
10919 const u8 *data = pep_get_sb(skb, &type, &len, buf);
10920 @@ -739,7 +739,7 @@ static int pipe_do_remove(struct sock *sk)
10921 ph->utid = 0;
10922 ph->message_id = PNS_PIPE_REMOVE_REQ;
10923 ph->pipe_handle = pn->pipe_handle;
10924 - ph->data[0] = PAD;
10925 + ph->data0 = PAD;
10926 return pn_skb_send(sk, skb, NULL);
10927 }
10928
10929 @@ -817,7 +817,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
10930 peer_type = hdr->other_pep_type << 8;
10931
10932 /* Parse sub-blocks (options) */
10933 - n_sb = hdr->data[4];
10934 + n_sb = hdr->data[3];
10935 while (n_sb > 0) {
10936 u8 type, buf[1], len = sizeof(buf);
10937 const u8 *data = pep_get_sb(skb, &type, &len, buf);
10938 @@ -1109,7 +1109,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
10939 ph->utid = 0;
10940 if (pn->aligned) {
10941 ph->message_id = PNS_PIPE_ALIGNED_DATA;
10942 - ph->data[0] = 0; /* padding */
10943 + ph->data0 = 0; /* padding */
10944 } else
10945 ph->message_id = PNS_PIPE_DATA;
10946 ph->pipe_handle = pn->pipe_handle;
10947 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
10948 index 97a8282955a8..8566531c2f10 100644
10949 --- a/net/sunrpc/svcsock.c
10950 +++ b/net/sunrpc/svcsock.c
10951 @@ -381,12 +381,16 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
10952 /*
10953 * Set socket snd and rcv buffer lengths
10954 */
10955 -static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
10956 - unsigned int rcv)
10957 +static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
10958 {
10959 + unsigned int max_mesg = svsk->sk_xprt.xpt_server->sv_max_mesg;
10960 + struct socket *sock = svsk->sk_sock;
10961 +
10962 + nreqs = min(nreqs, INT_MAX / 2 / max_mesg);
10963 +
10964 lock_sock(sock->sk);
10965 - sock->sk->sk_sndbuf = snd * 2;
10966 - sock->sk->sk_rcvbuf = rcv * 2;
10967 + sock->sk->sk_sndbuf = nreqs * max_mesg * 2;
10968 + sock->sk->sk_rcvbuf = nreqs * max_mesg * 2;
10969 sock->sk->sk_write_space(sock->sk);
10970 release_sock(sock->sk);
10971 }
10972 @@ -548,9 +552,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
10973 * provides an upper bound on the number of threads
10974 * which will access the socket.
10975 */
10976 - svc_sock_setbufsize(svsk->sk_sock,
10977 - (serv->sv_nrthreads+3) * serv->sv_max_mesg,
10978 - (serv->sv_nrthreads+3) * serv->sv_max_mesg);
10979 + svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3);
10980
10981 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
10982 skb = NULL;
10983 @@ -718,9 +720,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
10984 * receive and respond to one request.
10985 * svc_udp_recvfrom will re-adjust if necessary
10986 */
10987 - svc_sock_setbufsize(svsk->sk_sock,
10988 - 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
10989 - 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
10990 + svc_sock_setbufsize(svsk, 3);
10991
10992 /* data might have come in before data_ready set up */
10993 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
10994 diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
10995 index 3d6bf790cf1f..5ddbf227e7c6 100644
10996 --- a/net/sunrpc/xprtrdma/verbs.c
10997 +++ b/net/sunrpc/xprtrdma/verbs.c
10998 @@ -546,7 +546,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
10999
11000 sendcq = ib_alloc_cq(ia->ri_device, NULL,
11001 ep->rep_attr.cap.max_send_wr + 1,
11002 - 1, IB_POLL_WORKQUEUE);
11003 + ia->ri_device->num_comp_vectors > 1 ? 1 : 0,
11004 + IB_POLL_WORKQUEUE);
11005 if (IS_ERR(sendcq)) {
11006 rc = PTR_ERR(sendcq);
11007 dprintk("RPC: %s: failed to create send CQ: %i\n",
11008 diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
11009 index 6f05e831a73e..82723ef44db3 100644
11010 --- a/net/xfrm/xfrm_interface.c
11011 +++ b/net/xfrm/xfrm_interface.c
11012 @@ -76,10 +76,10 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
11013 int ifindex;
11014 struct xfrm_if *xi;
11015
11016 - if (!skb->dev)
11017 + if (!secpath_exists(skb) || !skb->dev)
11018 return NULL;
11019
11020 - xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id);
11021 + xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
11022 ifindex = skb->dev->ifindex;
11023
11024 for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
11025 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
11026 index 6ea8036fcdbe..bf5d59270f79 100644
11027 --- a/net/xfrm/xfrm_policy.c
11028 +++ b/net/xfrm/xfrm_policy.c
11029 @@ -2340,8 +2340,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
11030
11031 if (ifcb) {
11032 xi = ifcb->decode_session(skb);
11033 - if (xi)
11034 + if (xi) {
11035 if_id = xi->p.if_id;
11036 + net = xi->net;
11037 + }
11038 }
11039 rcu_read_unlock();
11040
11041 diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
11042 index a9186a98a37d..0c9c54b57515 100644
11043 --- a/scripts/kallsyms.c
11044 +++ b/scripts/kallsyms.c
11045 @@ -120,8 +120,8 @@ static int read_symbol(FILE *in, struct sym_entry *s)
11046 fprintf(stderr, "Read error or end of file.\n");
11047 return -1;
11048 }
11049 - if (strlen(sym) > KSYM_NAME_LEN) {
11050 - fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n"
11051 + if (strlen(sym) >= KSYM_NAME_LEN) {
11052 + fprintf(stderr, "Symbol %s too long for kallsyms (%zu >= %d).\n"
11053 "Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n",
11054 sym, strlen(sym), KSYM_NAME_LEN);
11055 return -1;
11056 diff --git a/security/keys/internal.h b/security/keys/internal.h
11057 index 9f8208dc0e55..a02742621c8d 100644
11058 --- a/security/keys/internal.h
11059 +++ b/security/keys/internal.h
11060 @@ -188,20 +188,9 @@ static inline int key_permission(const key_ref_t key_ref, unsigned perm)
11061 return key_task_permission(key_ref, current_cred(), perm);
11062 }
11063
11064 -/*
11065 - * Authorisation record for request_key().
11066 - */
11067 -struct request_key_auth {
11068 - struct key *target_key;
11069 - struct key *dest_keyring;
11070 - const struct cred *cred;
11071 - void *callout_info;
11072 - size_t callout_len;
11073 - pid_t pid;
11074 -} __randomize_layout;
11075 -
11076 extern struct key_type key_type_request_key_auth;
11077 extern struct key *request_key_auth_new(struct key *target,
11078 + const char *op,
11079 const void *callout_info,
11080 size_t callout_len,
11081 struct key *dest_keyring);
11082 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
11083 index 1ffe60bb2845..ca31af186abd 100644
11084 --- a/security/keys/keyctl.c
11085 +++ b/security/keys/keyctl.c
11086 @@ -26,6 +26,7 @@
11087 #include <linux/security.h>
11088 #include <linux/uio.h>
11089 #include <linux/uaccess.h>
11090 +#include <keys/request_key_auth-type.h>
11091 #include "internal.h"
11092
11093 #define KEY_MAX_DESC_SIZE 4096
11094 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
11095 index d5b25e535d3a..d776d2114651 100644
11096 --- a/security/keys/process_keys.c
11097 +++ b/security/keys/process_keys.c
11098 @@ -20,6 +20,7 @@
11099 #include <linux/security.h>
11100 #include <linux/user_namespace.h>
11101 #include <linux/uaccess.h>
11102 +#include <keys/request_key_auth-type.h>
11103 #include "internal.h"
11104
11105 /* Session keyring create vs join semaphore */
11106 diff --git a/security/keys/request_key.c b/security/keys/request_key.c
11107 index 738553698649..1a547cec8385 100644
11108 --- a/security/keys/request_key.c
11109 +++ b/security/keys/request_key.c
11110 @@ -18,31 +18,30 @@
11111 #include <linux/keyctl.h>
11112 #include <linux/slab.h>
11113 #include "internal.h"
11114 +#include <keys/request_key_auth-type.h>
11115
11116 #define key_negative_timeout 60 /* default timeout on a negative key's existence */
11117
11118 /**
11119 * complete_request_key - Complete the construction of a key.
11120 - * @cons: The key construction record.
11121 + * @auth_key: The authorisation key.
11122 * @error: The success or failute of the construction.
11123 *
11124 * Complete the attempt to construct a key. The key will be negated
11125 * if an error is indicated. The authorisation key will be revoked
11126 * unconditionally.
11127 */
11128 -void complete_request_key(struct key_construction *cons, int error)
11129 +void complete_request_key(struct key *authkey, int error)
11130 {
11131 - kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error);
11132 + struct request_key_auth *rka = get_request_key_auth(authkey);
11133 + struct key *key = rka->target_key;
11134 +
11135 + kenter("%d{%d},%d", authkey->serial, key->serial, error);
11136
11137 if (error < 0)
11138 - key_negate_and_link(cons->key, key_negative_timeout, NULL,
11139 - cons->authkey);
11140 + key_negate_and_link(key, key_negative_timeout, NULL, authkey);
11141 else
11142 - key_revoke(cons->authkey);
11143 -
11144 - key_put(cons->key);
11145 - key_put(cons->authkey);
11146 - kfree(cons);
11147 + key_revoke(authkey);
11148 }
11149 EXPORT_SYMBOL(complete_request_key);
11150
11151 @@ -91,21 +90,19 @@ static int call_usermodehelper_keys(const char *path, char **argv, char **envp,
11152 * Request userspace finish the construction of a key
11153 * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
11154 */
11155 -static int call_sbin_request_key(struct key_construction *cons,
11156 - const char *op,
11157 - void *aux)
11158 +static int call_sbin_request_key(struct key *authkey, void *aux)
11159 {
11160 static char const request_key[] = "/sbin/request-key";
11161 + struct request_key_auth *rka = get_request_key_auth(authkey);
11162 const struct cred *cred = current_cred();
11163 key_serial_t prkey, sskey;
11164 - struct key *key = cons->key, *authkey = cons->authkey, *keyring,
11165 - *session;
11166 + struct key *key = rka->target_key, *keyring, *session;
11167 char *argv[9], *envp[3], uid_str[12], gid_str[12];
11168 char key_str[12], keyring_str[3][12];
11169 char desc[20];
11170 int ret, i;
11171
11172 - kenter("{%d},{%d},%s", key->serial, authkey->serial, op);
11173 + kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op);
11174
11175 ret = install_user_keyrings();
11176 if (ret < 0)
11177 @@ -163,7 +160,7 @@ static int call_sbin_request_key(struct key_construction *cons,
11178 /* set up the argument list */
11179 i = 0;
11180 argv[i++] = (char *)request_key;
11181 - argv[i++] = (char *) op;
11182 + argv[i++] = (char *)rka->op;
11183 argv[i++] = key_str;
11184 argv[i++] = uid_str;
11185 argv[i++] = gid_str;
11186 @@ -191,7 +188,7 @@ error_link:
11187 key_put(keyring);
11188
11189 error_alloc:
11190 - complete_request_key(cons, ret);
11191 + complete_request_key(authkey, ret);
11192 kleave(" = %d", ret);
11193 return ret;
11194 }
11195 @@ -205,42 +202,31 @@ static int construct_key(struct key *key, const void *callout_info,
11196 size_t callout_len, void *aux,
11197 struct key *dest_keyring)
11198 {
11199 - struct key_construction *cons;
11200 request_key_actor_t actor;
11201 struct key *authkey;
11202 int ret;
11203
11204 kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
11205
11206 - cons = kmalloc(sizeof(*cons), GFP_KERNEL);
11207 - if (!cons)
11208 - return -ENOMEM;
11209 -
11210 /* allocate an authorisation key */
11211 - authkey = request_key_auth_new(key, callout_info, callout_len,
11212 + authkey = request_key_auth_new(key, "create", callout_info, callout_len,
11213 dest_keyring);
11214 - if (IS_ERR(authkey)) {
11215 - kfree(cons);
11216 - ret = PTR_ERR(authkey);
11217 - authkey = NULL;
11218 - } else {
11219 - cons->authkey = key_get(authkey);
11220 - cons->key = key_get(key);
11221 + if (IS_ERR(authkey))
11222 + return PTR_ERR(authkey);
11223
11224 - /* make the call */
11225 - actor = call_sbin_request_key;
11226 - if (key->type->request_key)
11227 - actor = key->type->request_key;
11228 + /* Make the call */
11229 + actor = call_sbin_request_key;
11230 + if (key->type->request_key)
11231 + actor = key->type->request_key;
11232
11233 - ret = actor(cons, "create", aux);
11234 + ret = actor(authkey, aux);
11235
11236 - /* check that the actor called complete_request_key() prior to
11237 - * returning an error */
11238 - WARN_ON(ret < 0 &&
11239 - !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
11240 - key_put(authkey);
11241 - }
11242 + /* check that the actor called complete_request_key() prior to
11243 + * returning an error */
11244 + WARN_ON(ret < 0 &&
11245 + !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
11246
11247 + key_put(authkey);
11248 kleave(" = %d", ret);
11249 return ret;
11250 }
11251 @@ -275,7 +261,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
11252 if (cred->request_key_auth) {
11253 authkey = cred->request_key_auth;
11254 down_read(&authkey->sem);
11255 - rka = authkey->payload.data[0];
11256 + rka = get_request_key_auth(authkey);
11257 if (!test_bit(KEY_FLAG_REVOKED,
11258 &authkey->flags))
11259 dest_keyring =
11260 diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
11261 index 6797843154f0..5e515791ccd1 100644
11262 --- a/security/keys/request_key_auth.c
11263 +++ b/security/keys/request_key_auth.c
11264 @@ -18,7 +18,7 @@
11265 #include <linux/slab.h>
11266 #include <linux/uaccess.h>
11267 #include "internal.h"
11268 -#include <keys/user-type.h>
11269 +#include <keys/request_key_auth-type.h>
11270
11271 static int request_key_auth_preparse(struct key_preparsed_payload *);
11272 static void request_key_auth_free_preparse(struct key_preparsed_payload *);
11273 @@ -69,7 +69,7 @@ static int request_key_auth_instantiate(struct key *key,
11274 static void request_key_auth_describe(const struct key *key,
11275 struct seq_file *m)
11276 {
11277 - struct request_key_auth *rka = key->payload.data[0];
11278 + struct request_key_auth *rka = get_request_key_auth(key);
11279
11280 seq_puts(m, "key:");
11281 seq_puts(m, key->description);
11282 @@ -84,7 +84,7 @@ static void request_key_auth_describe(const struct key *key,
11283 static long request_key_auth_read(const struct key *key,
11284 char __user *buffer, size_t buflen)
11285 {
11286 - struct request_key_auth *rka = key->payload.data[0];
11287 + struct request_key_auth *rka = get_request_key_auth(key);
11288 size_t datalen;
11289 long ret;
11290
11291 @@ -110,7 +110,7 @@ static long request_key_auth_read(const struct key *key,
11292 */
11293 static void request_key_auth_revoke(struct key *key)
11294 {
11295 - struct request_key_auth *rka = key->payload.data[0];
11296 + struct request_key_auth *rka = get_request_key_auth(key);
11297
11298 kenter("{%d}", key->serial);
11299
11300 @@ -137,7 +137,7 @@ static void free_request_key_auth(struct request_key_auth *rka)
11301 */
11302 static void request_key_auth_destroy(struct key *key)
11303 {
11304 - struct request_key_auth *rka = key->payload.data[0];
11305 + struct request_key_auth *rka = get_request_key_auth(key);
11306
11307 kenter("{%d}", key->serial);
11308
11309 @@ -148,8 +148,9 @@ static void request_key_auth_destroy(struct key *key)
11310 * Create an authorisation token for /sbin/request-key or whoever to gain
11311 * access to the caller's security data.
11312 */
11313 -struct key *request_key_auth_new(struct key *target, const void *callout_info,
11314 - size_t callout_len, struct key *dest_keyring)
11315 +struct key *request_key_auth_new(struct key *target, const char *op,
11316 + const void *callout_info, size_t callout_len,
11317 + struct key *dest_keyring)
11318 {
11319 struct request_key_auth *rka, *irka;
11320 const struct cred *cred = current->cred;
11321 @@ -167,6 +168,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
11322 if (!rka->callout_info)
11323 goto error_free_rka;
11324 rka->callout_len = callout_len;
11325 + strlcpy(rka->op, op, sizeof(rka->op));
11326
11327 /* see if the calling process is already servicing the key request of
11328 * another process */
11329 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
11330 index 3c3878f0d2fa..6ea3d3aa1a1e 100644
11331 --- a/security/selinux/hooks.c
11332 +++ b/security/selinux/hooks.c
11333 @@ -1034,8 +1034,11 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
11334 BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
11335
11336 /* if fs is reusing a sb, make sure that the contexts match */
11337 - if (newsbsec->flags & SE_SBINITIALIZED)
11338 + if (newsbsec->flags & SE_SBINITIALIZED) {
11339 + if ((kern_flags & SECURITY_LSM_NATIVE_LABELS) && !set_context)
11340 + *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
11341 return selinux_cmp_sb_context(oldsb, newsb);
11342 + }
11343
11344 mutex_lock(&newsbsec->lock);
11345
11346 @@ -5334,6 +5337,9 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
11347 return -EINVAL;
11348 }
11349
11350 + if (walk_size + len > addrlen)
11351 + return -EINVAL;
11352 +
11353 err = -EINVAL;
11354 switch (optname) {
11355 /* Bind checks */
11356 diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
11357 index 690c26e7389e..3be0e1478d7d 100644
11358 --- a/sound/soc/codecs/pcm186x.c
11359 +++ b/sound/soc/codecs/pcm186x.c
11360 @@ -42,7 +42,7 @@ struct pcm186x_priv {
11361 bool is_master_mode;
11362 };
11363
11364 -static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 4000, 50);
11365 +static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 50, 0);
11366
11367 static const struct snd_kcontrol_new pcm1863_snd_controls[] = {
11368 SOC_DOUBLE_R_S_TLV("ADC Capture Volume", PCM186X_PGA_VAL_CH1_L,
11369 @@ -158,7 +158,7 @@ static const struct snd_soc_dapm_widget pcm1863_dapm_widgets[] = {
11370 * Put the codec into SLEEP mode when not in use, allowing the
11371 * Energysense mechanism to operate.
11372 */
11373 - SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 0),
11374 + SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 1),
11375 };
11376
11377 static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
11378 @@ -184,8 +184,8 @@ static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
11379 * Put the codec into SLEEP mode when not in use, allowing the
11380 * Energysense mechanism to operate.
11381 */
11382 - SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 0),
11383 - SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 0),
11384 + SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 1),
11385 + SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 1),
11386 };
11387
11388 static const struct snd_soc_dapm_route pcm1863_dapm_routes[] = {
11389 diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
11390 index afe7d5b19313..79ebcc239786 100644
11391 --- a/sound/soc/codecs/rt5682.c
11392 +++ b/sound/soc/codecs/rt5682.c
11393 @@ -1757,7 +1757,9 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
11394 {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc},
11395 {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
11396 {"ADC STO1 ASRC", NULL, "AD ASRC"},
11397 + {"ADC STO1 ASRC", NULL, "DA ASRC"},
11398 {"ADC STO1 ASRC", NULL, "CLKDET"},
11399 + {"DAC STO1 ASRC", NULL, "AD ASRC"},
11400 {"DAC STO1 ASRC", NULL, "DA ASRC"},
11401 {"DAC STO1 ASRC", NULL, "CLKDET"},
11402
11403 diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
11404 index c1d1d06783e5..4daefa5b150a 100644
11405 --- a/sound/soc/fsl/fsl_esai.c
11406 +++ b/sound/soc/fsl/fsl_esai.c
11407 @@ -398,7 +398,8 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
11408 break;
11409 case SND_SOC_DAIFMT_RIGHT_J:
11410 /* Data on rising edge of bclk, frame high, right aligned */
11411 - xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCR_xWA;
11412 + xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP;
11413 + xcr |= ESAI_xCR_xWA;
11414 break;
11415 case SND_SOC_DAIFMT_DSP_A:
11416 /* Data on rising edge of bclk, frame high, 1clk before data */
11417 @@ -455,12 +456,12 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
11418 return -EINVAL;
11419 }
11420
11421 - mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR;
11422 + mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR | ESAI_xCR_xWA;
11423 regmap_update_bits(esai_priv->regmap, REG_ESAI_TCR, mask, xcr);
11424 regmap_update_bits(esai_priv->regmap, REG_ESAI_RCR, mask, xcr);
11425
11426 mask = ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCCR_xFSP |
11427 - ESAI_xCCR_xFSD | ESAI_xCCR_xCKD | ESAI_xCR_xWA;
11428 + ESAI_xCCR_xFSD | ESAI_xCCR_xCKD;
11429 regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, mask, xccr);
11430 regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, mask, xccr);
11431
11432 diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
11433 index d6c62aa13041..ce00fe2f6aae 100644
11434 --- a/sound/soc/samsung/i2s.c
11435 +++ b/sound/soc/samsung/i2s.c
11436 @@ -700,6 +700,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
11437 {
11438 struct i2s_dai *i2s = to_info(dai);
11439 u32 mod, mask = 0, val = 0;
11440 + struct clk *rclksrc;
11441 unsigned long flags;
11442
11443 WARN_ON(!pm_runtime_active(dai->dev));
11444 @@ -782,6 +783,10 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
11445
11446 i2s->frmclk = params_rate(params);
11447
11448 + rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
11449 + if (rclksrc && !IS_ERR(rclksrc))
11450 + i2s->rclk_srcrate = clk_get_rate(rclksrc);
11451 +
11452 return 0;
11453 }
11454
11455 @@ -886,11 +891,6 @@ static int config_setup(struct i2s_dai *i2s)
11456 return 0;
11457
11458 if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
11459 - struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
11460 -
11461 - if (rclksrc && !IS_ERR(rclksrc))
11462 - i2s->rclk_srcrate = clk_get_rate(rclksrc);
11463 -
11464 psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
11465 writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
11466 dev_dbg(&i2s->pdev->dev,
11467 diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
11468 index a566dae3ec8a..9410e0a9b14b 100644
11469 --- a/sound/soc/sh/rcar/ssi.c
11470 +++ b/sound/soc/sh/rcar/ssi.c
11471 @@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
11472 if (rsnd_ssi_is_multi_slave(mod, io))
11473 return 0;
11474
11475 - if (ssi->usrcnt > 1) {
11476 + if (ssi->usrcnt > 0) {
11477 if (ssi->rate != rate) {
11478 dev_err(dev, "SSI parent/child should use same rate\n");
11479 return -EINVAL;
11480 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
11481 index 653706945222..9b78fb3daa7b 100644
11482 --- a/sound/soc/soc-dapm.c
11483 +++ b/sound/soc/soc-dapm.c
11484 @@ -71,12 +71,16 @@ static int dapm_up_seq[] = {
11485 [snd_soc_dapm_clock_supply] = 1,
11486 [snd_soc_dapm_supply] = 2,
11487 [snd_soc_dapm_micbias] = 3,
11488 + [snd_soc_dapm_vmid] = 3,
11489 [snd_soc_dapm_dai_link] = 2,
11490 [snd_soc_dapm_dai_in] = 4,
11491 [snd_soc_dapm_dai_out] = 4,
11492 [snd_soc_dapm_aif_in] = 4,
11493 [snd_soc_dapm_aif_out] = 4,
11494 [snd_soc_dapm_mic] = 5,
11495 + [snd_soc_dapm_siggen] = 5,
11496 + [snd_soc_dapm_input] = 5,
11497 + [snd_soc_dapm_output] = 5,
11498 [snd_soc_dapm_mux] = 6,
11499 [snd_soc_dapm_demux] = 6,
11500 [snd_soc_dapm_dac] = 7,
11501 @@ -84,11 +88,19 @@ static int dapm_up_seq[] = {
11502 [snd_soc_dapm_mixer] = 8,
11503 [snd_soc_dapm_mixer_named_ctl] = 8,
11504 [snd_soc_dapm_pga] = 9,
11505 + [snd_soc_dapm_buffer] = 9,
11506 + [snd_soc_dapm_scheduler] = 9,
11507 + [snd_soc_dapm_effect] = 9,
11508 + [snd_soc_dapm_src] = 9,
11509 + [snd_soc_dapm_asrc] = 9,
11510 + [snd_soc_dapm_encoder] = 9,
11511 + [snd_soc_dapm_decoder] = 9,
11512 [snd_soc_dapm_adc] = 10,
11513 [snd_soc_dapm_out_drv] = 11,
11514 [snd_soc_dapm_hp] = 11,
11515 [snd_soc_dapm_spk] = 11,
11516 [snd_soc_dapm_line] = 11,
11517 + [snd_soc_dapm_sink] = 11,
11518 [snd_soc_dapm_kcontrol] = 12,
11519 [snd_soc_dapm_post] = 13,
11520 };
11521 @@ -101,13 +113,25 @@ static int dapm_down_seq[] = {
11522 [snd_soc_dapm_spk] = 3,
11523 [snd_soc_dapm_line] = 3,
11524 [snd_soc_dapm_out_drv] = 3,
11525 + [snd_soc_dapm_sink] = 3,
11526 [snd_soc_dapm_pga] = 4,
11527 + [snd_soc_dapm_buffer] = 4,
11528 + [snd_soc_dapm_scheduler] = 4,
11529 + [snd_soc_dapm_effect] = 4,
11530 + [snd_soc_dapm_src] = 4,
11531 + [snd_soc_dapm_asrc] = 4,
11532 + [snd_soc_dapm_encoder] = 4,
11533 + [snd_soc_dapm_decoder] = 4,
11534 [snd_soc_dapm_switch] = 5,
11535 [snd_soc_dapm_mixer_named_ctl] = 5,
11536 [snd_soc_dapm_mixer] = 5,
11537 [snd_soc_dapm_dac] = 6,
11538 [snd_soc_dapm_mic] = 7,
11539 + [snd_soc_dapm_siggen] = 7,
11540 + [snd_soc_dapm_input] = 7,
11541 + [snd_soc_dapm_output] = 7,
11542 [snd_soc_dapm_micbias] = 8,
11543 + [snd_soc_dapm_vmid] = 8,
11544 [snd_soc_dapm_mux] = 9,
11545 [snd_soc_dapm_demux] = 9,
11546 [snd_soc_dapm_aif_in] = 10,
11547 diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
11548 index 66e77e020745..88a7e860b175 100644
11549 --- a/sound/soc/soc-topology.c
11550 +++ b/sound/soc/soc-topology.c
11551 @@ -2493,6 +2493,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
11552 struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id)
11553 {
11554 struct soc_tplg tplg;
11555 + int ret;
11556
11557 /* setup parsing context */
11558 memset(&tplg, 0, sizeof(tplg));
11559 @@ -2506,7 +2507,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
11560 tplg.bytes_ext_ops = ops->bytes_ext_ops;
11561 tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count;
11562
11563 - return soc_tplg_load(&tplg);
11564 + ret = soc_tplg_load(&tplg);
11565 + /* free the created components if fail to load topology */
11566 + if (ret)
11567 + snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL);
11568 +
11569 + return ret;
11570 }
11571 EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
11572
11573 diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
11574 index db1511359c5e..40c93d8158b5 100644
11575 --- a/tools/perf/util/auxtrace.c
11576 +++ b/tools/perf/util/auxtrace.c
11577 @@ -1273,9 +1273,9 @@ static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
11578 }
11579
11580 /* padding must be written by fn() e.g. record__process_auxtrace() */
11581 - padding = size & 7;
11582 + padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
11583 if (padding)
11584 - padding = 8 - padding;
11585 + padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
11586
11587 memset(&ev, 0, sizeof(ev));
11588 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
11589 diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
11590 index 71fc3bd74299..136c5f95f9bf 100644
11591 --- a/tools/perf/util/auxtrace.h
11592 +++ b/tools/perf/util/auxtrace.h
11593 @@ -38,6 +38,9 @@ struct record_opts;
11594 struct auxtrace_info_event;
11595 struct events_stats;
11596
11597 +/* Auxtrace records must have the same alignment as perf event records */
11598 +#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
11599 +
11600 enum auxtrace_type {
11601 PERF_AUXTRACE_UNKNOWN,
11602 PERF_AUXTRACE_INTEL_PT,
11603 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
11604 index d404bed7003a..f3db68abbd9a 100644
11605 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
11606 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
11607 @@ -26,6 +26,7 @@
11608
11609 #include "../cache.h"
11610 #include "../util.h"
11611 +#include "../auxtrace.h"
11612
11613 #include "intel-pt-insn-decoder.h"
11614 #include "intel-pt-pkt-decoder.h"
11615 @@ -1389,7 +1390,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
11616 {
11617 intel_pt_log("ERROR: Buffer overflow\n");
11618 intel_pt_clear_tx_flags(decoder);
11619 - decoder->cbr = 0;
11620 decoder->timestamp_insn_cnt = 0;
11621 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
11622 decoder->overflow = true;
11623 @@ -2559,6 +2559,34 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
11624 }
11625 }
11626
11627 +#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
11628 +
11629 +/**
11630 + * adj_for_padding - adjust overlap to account for padding.
11631 + * @buf_b: second buffer
11632 + * @buf_a: first buffer
11633 + * @len_a: size of first buffer
11634 + *
11635 + * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
11636 + * accordingly.
11637 + *
11638 + * Return: A pointer into @buf_b from where non-overlapped data starts
11639 + */
11640 +static unsigned char *adj_for_padding(unsigned char *buf_b,
11641 + unsigned char *buf_a, size_t len_a)
11642 +{
11643 + unsigned char *p = buf_b - MAX_PADDING;
11644 + unsigned char *q = buf_a + len_a - MAX_PADDING;
11645 + int i;
11646 +
11647 + for (i = MAX_PADDING; i; i--, p++, q++) {
11648 + if (*p != *q)
11649 + break;
11650 + }
11651 +
11652 + return p;
11653 +}
11654 +
11655 /**
11656 * intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
11657 * using TSC.
11658 @@ -2609,8 +2637,11 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
11659
11660 /* Same TSC, so buffers are consecutive */
11661 if (!cmp && rem_b >= rem_a) {
11662 + unsigned char *start;
11663 +
11664 *consecutive = true;
11665 - return buf_b + len_b - (rem_b - rem_a);
11666 + start = buf_b + len_b - (rem_b - rem_a);
11667 + return adj_for_padding(start, buf_a, len_a);
11668 }
11669 if (cmp < 0)
11670 return buf_b; /* tsc_a < tsc_b => no overlap */
11671 @@ -2673,7 +2704,7 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
11672 found = memmem(buf_a, len_a, buf_b, len_a);
11673 if (found) {
11674 *consecutive = true;
11675 - return buf_b + len_a;
11676 + return adj_for_padding(buf_b + len_a, buf_a, len_a);
11677 }
11678
11679 /* Try again at next PSB in buffer 'a' */
11680 diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
11681 index 4f48bc11a29c..ff2c41ea94c8 100644
11682 --- a/tools/perf/util/intel-pt.c
11683 +++ b/tools/perf/util/intel-pt.c
11684 @@ -2507,6 +2507,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
11685 }
11686
11687 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
11688 + if (pt->timeless_decoding && !pt->tc.time_mult)
11689 + pt->tc.time_mult = 1;
11690 pt->have_tsc = intel_pt_have_tsc(pt);
11691 pt->sampling_mode = false;
11692 pt->est_tsc = !pt->timeless_decoding;
11693 diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
11694 index d188b7588152..0715f972a275 100644
11695 --- a/tools/perf/util/symbol.c
11696 +++ b/tools/perf/util/symbol.c
11697 @@ -709,6 +709,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
11698 }
11699
11700 pos->start -= curr_map->start - curr_map->pgoff;
11701 + if (pos->end > curr_map->end)
11702 + pos->end = curr_map->end;
11703 if (pos->end)
11704 pos->end -= curr_map->start - curr_map->pgoff;
11705 symbols__insert(&curr_map->dso->symbols, pos);
11706 diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
11707 index 147e34cfceb7..02d7c871862a 100644
11708 --- a/tools/testing/selftests/bpf/test_lpm_map.c
11709 +++ b/tools/testing/selftests/bpf/test_lpm_map.c
11710 @@ -474,6 +474,16 @@ static void test_lpm_delete(void)
11711 assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
11712 errno == ENOENT);
11713
11714 + key->prefixlen = 30; // unused prefix so far
11715 + inet_pton(AF_INET, "192.255.0.0", key->data);
11716 + assert(bpf_map_delete_elem(map_fd, key) == -1 &&
11717 + errno == ENOENT);
11718 +
11719 + key->prefixlen = 16; // same prefix as the root node
11720 + inet_pton(AF_INET, "192.255.0.0", key->data);
11721 + assert(bpf_map_delete_elem(map_fd, key) == -1 &&
11722 + errno == ENOENT);
11723 +
11724 /* assert initial lookup */
11725 key->prefixlen = 32;
11726 inet_pton(AF_INET, "192.168.0.1", key->data);
11727 diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
11728 index 0f45633bd634..a4ccde0e473b 100755
11729 --- a/tools/testing/selftests/net/fib_tests.sh
11730 +++ b/tools/testing/selftests/net/fib_tests.sh
11731 @@ -385,6 +385,7 @@ fib_carrier_unicast_test()
11732
11733 set -e
11734 $IP link set dev dummy0 carrier off
11735 + sleep 1
11736 set +e
11737
11738 echo " Carrier down"
11739 diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
11740 index 91495045ad5a..1415e36fed3d 100644
11741 --- a/virt/kvm/arm/arm.c
11742 +++ b/virt/kvm/arm/arm.c
11743 @@ -624,6 +624,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
11744 /* Awaken to handle a signal, request we sleep again later. */
11745 kvm_make_request(KVM_REQ_SLEEP, vcpu);
11746 }
11747 +
11748 + /*
11749 + * Make sure we will observe a potential reset request if we've
11750 + * observed a change to the power state. Pairs with the smp_wmb() in
11751 + * kvm_psci_vcpu_on().
11752 + */
11753 + smp_rmb();
11754 }
11755
11756 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
11757 @@ -637,6 +644,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
11758 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
11759 vcpu_req_sleep(vcpu);
11760
11761 + if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
11762 + kvm_reset_vcpu(vcpu);
11763 +
11764 /*
11765 * Clear IRQ_PENDING requests that were made to guarantee
11766 * that a VCPU sees new virtual interrupts.
11767 diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
11768 index 2f405b0be25c..1344557a7085 100644
11769 --- a/virt/kvm/arm/mmu.c
11770 +++ b/virt/kvm/arm/mmu.c
11771 @@ -2154,7 +2154,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
11772 return 0;
11773 }
11774
11775 -void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
11776 +void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
11777 {
11778 }
11779
11780 diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
11781 index 9b73d3ad918a..34d08ee63747 100644
11782 --- a/virt/kvm/arm/psci.c
11783 +++ b/virt/kvm/arm/psci.c
11784 @@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
11785
11786 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
11787 {
11788 + struct vcpu_reset_state *reset_state;
11789 struct kvm *kvm = source_vcpu->kvm;
11790 struct kvm_vcpu *vcpu = NULL;
11791 - struct swait_queue_head *wq;
11792 unsigned long cpu_id;
11793 - unsigned long context_id;
11794 - phys_addr_t target_pc;
11795
11796 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
11797 if (vcpu_mode_is_32bit(source_vcpu))
11798 @@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
11799 return PSCI_RET_INVALID_PARAMS;
11800 }
11801
11802 - target_pc = smccc_get_arg2(source_vcpu);
11803 - context_id = smccc_get_arg3(source_vcpu);
11804 + reset_state = &vcpu->arch.reset_state;
11805
11806 - kvm_reset_vcpu(vcpu);
11807 -
11808 - /* Gracefully handle Thumb2 entry point */
11809 - if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
11810 - target_pc &= ~((phys_addr_t) 1);
11811 - vcpu_set_thumb(vcpu);
11812 - }
11813 + reset_state->pc = smccc_get_arg2(source_vcpu);
11814
11815 /* Propagate caller endianness */
11816 - if (kvm_vcpu_is_be(source_vcpu))
11817 - kvm_vcpu_set_be(vcpu);
11818 + reset_state->be = kvm_vcpu_is_be(source_vcpu);
11819
11820 - *vcpu_pc(vcpu) = target_pc;
11821 /*
11822 * NOTE: We always update r0 (or x0) because for PSCI v0.1
11823 * the general puspose registers are undefined upon CPU_ON.
11824 */
11825 - smccc_set_retval(vcpu, context_id, 0, 0, 0);
11826 - vcpu->arch.power_off = false;
11827 - smp_mb(); /* Make sure the above is visible */
11828 + reset_state->r0 = smccc_get_arg3(source_vcpu);
11829 +
11830 + WRITE_ONCE(reset_state->reset, true);
11831 + kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
11832
11833 - wq = kvm_arch_vcpu_wq(vcpu);
11834 - swake_up_one(wq);
11835 + /*
11836 + * Make sure the reset request is observed if the change to
11837 + * power_state is observed.
11838 + */
11839 + smp_wmb();
11840 +
11841 + vcpu->arch.power_off = false;
11842 + kvm_vcpu_wake_up(vcpu);
11843
11844 return PSCI_RET_SUCCESS;
11845 }
11846 diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
11847 index c0c0b88af1d5..8196e4f8731f 100644
11848 --- a/virt/kvm/arm/vgic/vgic-init.c
11849 +++ b/virt/kvm/arm/vgic/vgic-init.c
11850 @@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
11851 struct vgic_dist *dist = &kvm->arch.vgic;
11852
11853 INIT_LIST_HEAD(&dist->lpi_list_head);
11854 - spin_lock_init(&dist->lpi_list_lock);
11855 + raw_spin_lock_init(&dist->lpi_list_lock);
11856 }
11857
11858 /* CREATION */
11859 @@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
11860 irq->config = VGIC_CONFIG_LEVEL;
11861 }
11862
11863 - /*
11864 - * GICv3 can only be created via the KVM_DEVICE_CREATE API and
11865 - * so we always know the emulation type at this point as it's
11866 - * either explicitly configured as GICv3, or explicitly
11867 - * configured as GICv2, or not configured yet which also
11868 - * implies GICv2.
11869 - */
11870 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
11871 irq->group = 1;
11872 else
11873 @@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm)
11874 {
11875 struct vgic_dist *dist = &kvm->arch.vgic;
11876 struct kvm_vcpu *vcpu;
11877 - int ret = 0, i;
11878 + int ret = 0, i, idx;
11879
11880 if (vgic_initialized(kvm))
11881 return 0;
11882 @@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm)
11883 if (ret)
11884 goto out;
11885
11886 + /* Initialize groups on CPUs created before the VGIC type was known */
11887 + kvm_for_each_vcpu(idx, vcpu, kvm) {
11888 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
11889 +
11890 + for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
11891 + struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
11892 + if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
11893 + irq->group = 1;
11894 + else
11895 + irq->group = 0;
11896 + }
11897 + }
11898 +
11899 if (vgic_has_its(kvm)) {
11900 ret = vgic_v4_init(kvm);
11901 if (ret)
11902 diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
11903 index 12502251727e..f376c82afb61 100644
11904 --- a/virt/kvm/arm/vgic/vgic-its.c
11905 +++ b/virt/kvm/arm/vgic/vgic-its.c
11906 @@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
11907 irq->target_vcpu = vcpu;
11908 irq->group = 1;
11909
11910 - spin_lock_irqsave(&dist->lpi_list_lock, flags);
11911 + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
11912
11913 /*
11914 * There could be a race with another vgic_add_lpi(), so we need to
11915 @@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
11916 dist->lpi_list_count++;
11917
11918 out_unlock:
11919 - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
11920 + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
11921
11922 /*
11923 * We "cache" the configuration table entries in our struct vgic_irq's.
11924 @@ -339,7 +339,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
11925 if (!intids)
11926 return -ENOMEM;
11927
11928 - spin_lock_irqsave(&dist->lpi_list_lock, flags);
11929 + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
11930 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
11931 if (i == irq_count)
11932 break;
11933 @@ -348,7 +348,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
11934 continue;
11935 intids[i++] = irq->intid;
11936 }
11937 - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
11938 + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
11939
11940 *intid_ptr = intids;
11941 return i;
11942 diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
11943 index f884a54b2601..c5165e3b80cb 100644
11944 --- a/virt/kvm/arm/vgic/vgic.c
11945 +++ b/virt/kvm/arm/vgic/vgic.c
11946 @@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
11947 struct vgic_irq *irq = NULL;
11948 unsigned long flags;
11949
11950 - spin_lock_irqsave(&dist->lpi_list_lock, flags);
11951 + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
11952
11953 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
11954 if (irq->intid != intid)
11955 @@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
11956 irq = NULL;
11957
11958 out_unlock:
11959 - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
11960 + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
11961
11962 return irq;
11963 }
11964 @@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
11965 if (irq->intid < VGIC_MIN_LPI)
11966 return;
11967
11968 - spin_lock_irqsave(&dist->lpi_list_lock, flags);
11969 + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
11970 if (!kref_put(&irq->refcount, vgic_irq_release)) {
11971 - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
11972 + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
11973 return;
11974 };
11975
11976 list_del(&irq->lpi_list);
11977 dist->lpi_list_count--;
11978 - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
11979 + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
11980
11981 kfree(irq);
11982 }
11983 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
11984 index 0ffb02ff5234..c436d95fd7aa 100644
11985 --- a/virt/kvm/kvm_main.c
11986 +++ b/virt/kvm/kvm_main.c
11987 @@ -873,6 +873,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
11988 int as_id, struct kvm_memslots *slots)
11989 {
11990 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
11991 + u64 gen;
11992
11993 /*
11994 * Set the low bit in the generation, which disables SPTE caching
11995 @@ -895,9 +896,11 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
11996 * space 0 will use generations 0, 4, 8, ... while * address space 1 will
11997 * use generations 2, 6, 10, 14, ...
11998 */
11999 - slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
12000 + gen = slots->generation + KVM_ADDRESS_SPACE_NUM * 2 - 1;
12001
12002 - kvm_arch_memslots_updated(kvm, slots);
12003 + kvm_arch_memslots_updated(kvm, gen);
12004 +
12005 + slots->generation = gen;
12006
12007 return old_memslots;
12008 }