Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.17/0103-4.17.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3150 - (show annotations) (download)
Mon Jul 9 08:30:47 2018 UTC (5 years, 9 months ago) by niro
File size: 236391 byte(s)
-linux-4.17.4
1 diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
2 index 8e69345c37cc..bbbabffc682a 100644
3 --- a/Documentation/ABI/testing/sysfs-class-cxl
4 +++ b/Documentation/ABI/testing/sysfs-class-cxl
5 @@ -69,7 +69,9 @@ Date: September 2014
6 Contact: linuxppc-dev@lists.ozlabs.org
7 Description: read/write
8 Set the mode for prefaulting in segments into the segment table
9 - when performing the START_WORK ioctl. Possible values:
10 + when performing the START_WORK ioctl. Only applicable when
11 + running under hashed page table mmu.
12 + Possible values:
13 none: No prefaulting (default)
14 work_element_descriptor: Treat the work element
15 descriptor as an effective address and
16 diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
17 index eb30efdd2e78..25dc591cb110 100644
18 --- a/Documentation/core-api/printk-formats.rst
19 +++ b/Documentation/core-api/printk-formats.rst
20 @@ -419,11 +419,10 @@ struct clk
21
22 %pC pll1
23 %pCn pll1
24 - %pCr 1560000000
25
26 For printing struct clk structures. %pC and %pCn print the name
27 (Common Clock Framework) or address (legacy clock framework) of the
28 -structure; %pCr prints the current clock rate.
29 +structure.
30
31 Passed by reference.
32
33 diff --git a/Makefile b/Makefile
34 index 31dc3a08295a..1d740dbe676d 100644
35 --- a/Makefile
36 +++ b/Makefile
37 @@ -1,7 +1,7 @@
38 # SPDX-License-Identifier: GPL-2.0
39 VERSION = 4
40 PATCHLEVEL = 17
41 -SUBLEVEL = 3
42 +SUBLEVEL = 4
43 EXTRAVERSION =
44 NAME = Merciless Moray
45
46 diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
47 index e10c03496524..a115575b38bf 100644
48 --- a/arch/arm/boot/dts/mt7623.dtsi
49 +++ b/arch/arm/boot/dts/mt7623.dtsi
50 @@ -22,11 +22,12 @@
51 #include <dt-bindings/phy/phy.h>
52 #include <dt-bindings/reset/mt2701-resets.h>
53 #include <dt-bindings/thermal/thermal.h>
54 -#include "skeleton64.dtsi"
55
56 / {
57 compatible = "mediatek,mt7623";
58 interrupt-parent = <&sysirq>;
59 + #address-cells = <2>;
60 + #size-cells = <2>;
61
62 cpu_opp_table: opp-table {
63 compatible = "operating-points-v2";
64 diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
65 index bbf56f855e46..5938e4c79deb 100644
66 --- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
67 +++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
68 @@ -109,6 +109,7 @@
69 };
70
71 memory@80000000 {
72 + device_type = "memory";
73 reg = <0 0x80000000 0 0x40000000>;
74 };
75 };
76 diff --git a/arch/arm/boot/dts/mt7623n-rfb.dtsi b/arch/arm/boot/dts/mt7623n-rfb.dtsi
77 index 256c5fd947bf..43c9d7ca23a0 100644
78 --- a/arch/arm/boot/dts/mt7623n-rfb.dtsi
79 +++ b/arch/arm/boot/dts/mt7623n-rfb.dtsi
80 @@ -47,6 +47,7 @@
81 };
82
83 memory@80000000 {
84 + device_type = "memory";
85 reg = <0 0x80000000 0 0x40000000>;
86 };
87
88 diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
89 index 486d4e7433ed..b38f8c240558 100644
90 --- a/arch/arm/boot/dts/socfpga.dtsi
91 +++ b/arch/arm/boot/dts/socfpga.dtsi
92 @@ -748,13 +748,13 @@
93 nand0: nand@ff900000 {
94 #address-cells = <0x1>;
95 #size-cells = <0x1>;
96 - compatible = "denali,denali-nand-dt";
97 + compatible = "altr,socfpga-denali-nand";
98 reg = <0xff900000 0x100000>,
99 <0xffb80000 0x10000>;
100 reg-names = "nand_data", "denali_reg";
101 interrupts = <0x0 0x90 0x4>;
102 dma-mask = <0xffffffff>;
103 - clocks = <&nand_clk>;
104 + clocks = <&nand_x_clk>;
105 status = "disabled";
106 };
107
108 diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
109 index bead79e4b2aa..791ca15c799e 100644
110 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi
111 +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
112 @@ -593,8 +593,7 @@
113 #size-cells = <0>;
114 reg = <0xffda5000 0x100>;
115 interrupts = <0 102 4>;
116 - num-chipselect = <4>;
117 - bus-num = <0>;
118 + num-cs = <4>;
119 /*32bit_access;*/
120 tx-dma-channel = <&pdma 16>;
121 rx-dma-channel = <&pdma 17>;
122 @@ -633,7 +632,7 @@
123 nand: nand@ffb90000 {
124 #address-cells = <1>;
125 #size-cells = <1>;
126 - compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand";
127 + compatible = "altr,socfpga-denali-nand";
128 reg = <0xffb90000 0x72000>,
129 <0xffb80000 0x10000>;
130 reg-names = "nand_data", "denali_reg";
131 diff --git a/arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dts b/arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dts
132 index b20a710da7bc..7a4fca36c673 100644
133 --- a/arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dts
134 +++ b/arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dts
135 @@ -62,8 +62,8 @@
136 reg_vcc1v2: vcc1v2 {
137 compatible = "regulator-fixed";
138 regulator-name = "vcc1v2";
139 - regulator-min-microvolt = <3300000>;
140 - regulator-max-microvolt = <3300000>;
141 + regulator-min-microvolt = <1200000>;
142 + regulator-max-microvolt = <1200000>;
143 regulator-always-on;
144 regulator-boot-on;
145 vin-supply = <&reg_vcc5v0>;
146 @@ -113,8 +113,8 @@
147 reg_vdd_cpux: vdd-cpux {
148 compatible = "regulator-fixed";
149 regulator-name = "vdd-cpux";
150 - regulator-min-microvolt = <3300000>;
151 - regulator-max-microvolt = <3300000>;
152 + regulator-min-microvolt = <1200000>;
153 + regulator-max-microvolt = <1200000>;
154 regulator-always-on;
155 regulator-boot-on;
156 vin-supply = <&reg_vcc5v0>;
157 diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
158 index 3b73fdcf3627..8de1100d1067 100644
159 --- a/arch/arm/include/asm/kgdb.h
160 +++ b/arch/arm/include/asm/kgdb.h
161 @@ -77,7 +77,7 @@ extern int kgdb_fault_expected;
162
163 #define KGDB_MAX_NO_CPUS 1
164 #define BUFMAX 400
165 -#define NUMREGBYTES (DBG_MAX_REG_NUM << 2)
166 +#define NUMREGBYTES (GDB_MAX_REGS << 2)
167 #define NUMCRITREGBYTES (32 << 2)
168
169 #define _R0 0
170 diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
171 index c89d0c307f8d..2c63e60754c5 100644
172 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
173 +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
174 @@ -252,8 +252,7 @@
175 interrupts = <0 99 4>;
176 resets = <&rst SPIM0_RESET>;
177 reg-io-width = <4>;
178 - num-chipselect = <4>;
179 - bus-num = <0>;
180 + num-cs = <4>;
181 status = "disabled";
182 };
183
184 @@ -265,8 +264,7 @@
185 interrupts = <0 100 4>;
186 resets = <&rst SPIM1_RESET>;
187 reg-io-width = <4>;
188 - num-chipselect = <4>;
189 - bus-num = <0>;
190 + num-cs = <4>;
191 status = "disabled";
192 };
193
194 diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
195 index 3c31e21cbed7..69693977fe07 100644
196 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
197 +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
198 @@ -35,6 +35,12 @@
199 no-map;
200 };
201
202 + /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
203 + secmon_reserved_alt: secmon@5000000 {
204 + reg = <0x0 0x05000000 0x0 0x300000>;
205 + no-map;
206 + };
207 +
208 linux,cma {
209 compatible = "shared-dma-pool";
210 reusable;
211 diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
212 index 3e3eb31748a3..f63bceb88caa 100644
213 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
214 +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
215 @@ -234,9 +234,6 @@
216
217 bus-width = <4>;
218 cap-sd-highspeed;
219 - sd-uhs-sdr12;
220 - sd-uhs-sdr25;
221 - sd-uhs-sdr50;
222 max-frequency = <100000000>;
223 disable-wp;
224
225 diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
226 index dba365ed4bd5..33c15f2a949e 100644
227 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
228 +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
229 @@ -13,14 +13,6 @@
230 / {
231 compatible = "amlogic,meson-gxl";
232
233 - reserved-memory {
234 - /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
235 - secmon_reserved_alt: secmon@5000000 {
236 - reg = <0x0 0x05000000 0x0 0x300000>;
237 - no-map;
238 - };
239 - };
240 -
241 soc {
242 usb0: usb@c9000000 {
243 status = "disabled";
244 diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
245 index ed2f1237ea1e..8259b32f0ced 100644
246 --- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
247 +++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
248 @@ -149,7 +149,7 @@
249
250 CP110_LABEL(icu): interrupt-controller@1e0000 {
251 compatible = "marvell,cp110-icu";
252 - reg = <0x1e0000 0x10>;
253 + reg = <0x1e0000 0x440>;
254 #interrupt-cells = <3>;
255 interrupt-controller;
256 msi-parent = <&gicp>;
257 diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
258 index 253188fb8cb0..e3e50950a863 100644
259 --- a/arch/arm64/crypto/aes-glue.c
260 +++ b/arch/arm64/crypto/aes-glue.c
261 @@ -223,8 +223,8 @@ static int ctr_encrypt(struct skcipher_request *req)
262 kernel_neon_begin();
263 aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
264 (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
265 - err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
266 kernel_neon_end();
267 + err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
268 }
269 if (walk.nbytes) {
270 u8 __aligned(8) tail[AES_BLOCK_SIZE];
271 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
272 index 9d1b06d67c53..df0bd090f0e4 100644
273 --- a/arch/arm64/kernel/cpufeature.c
274 +++ b/arch/arm64/kernel/cpufeature.c
275 @@ -937,7 +937,7 @@ static int __init parse_kpti(char *str)
276 __kpti_forced = enabled ? 1 : -1;
277 return 0;
278 }
279 -__setup("kpti=", parse_kpti);
280 +early_param("kpti", parse_kpti);
281 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
282
283 #ifdef CONFIG_ARM64_HW_AFDBM
284 diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
285 index 154b7d30145d..f21209064041 100644
286 --- a/arch/arm64/kernel/signal.c
287 +++ b/arch/arm64/kernel/signal.c
288 @@ -830,11 +830,12 @@ static void do_signal(struct pt_regs *regs)
289 unsigned long continue_addr = 0, restart_addr = 0;
290 int retval = 0;
291 struct ksignal ksig;
292 + bool syscall = in_syscall(regs);
293
294 /*
295 * If we were from a system call, check for system call restarting...
296 */
297 - if (in_syscall(regs)) {
298 + if (syscall) {
299 continue_addr = regs->pc;
300 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
301 retval = regs->regs[0];
302 @@ -886,7 +887,7 @@ static void do_signal(struct pt_regs *regs)
303 * Handle restarting a different system call. As above, if a debugger
304 * has chosen to restart at a different PC, ignore the restart.
305 */
306 - if (in_syscall(regs) && regs->pc == restart_addr) {
307 + if (syscall && regs->pc == restart_addr) {
308 if (retval == -ERESTART_RESTARTBLOCK)
309 setup_restart_syscall(regs);
310 user_rewind_single_step(current);
311 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
312 index 5f9a73a4452c..03646e6a2ef4 100644
313 --- a/arch/arm64/mm/proc.S
314 +++ b/arch/arm64/mm/proc.S
315 @@ -217,8 +217,9 @@ ENDPROC(idmap_cpu_replace_ttbr1)
316
317 .macro __idmap_kpti_put_pgtable_ent_ng, type
318 orr \type, \type, #PTE_NG // Same bit for blocks and pages
319 - str \type, [cur_\()\type\()p] // Update the entry and ensure it
320 - dc civac, cur_\()\type\()p // is visible to all CPUs.
321 + str \type, [cur_\()\type\()p] // Update the entry and ensure
322 + dmb sy // that it is visible to all
323 + dc civac, cur_\()\type\()p // CPUs.
324 .endm
325
326 /*
327 diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
328 index 0c3275aa0197..e522307db47c 100644
329 --- a/arch/m68k/mac/config.c
330 +++ b/arch/m68k/mac/config.c
331 @@ -1005,7 +1005,7 @@ int __init mac_platform_init(void)
332 struct resource swim_rsrc = {
333 .flags = IORESOURCE_MEM,
334 .start = (resource_size_t)swim_base,
335 - .end = (resource_size_t)swim_base + 0x2000,
336 + .end = (resource_size_t)swim_base + 0x1FFF,
337 };
338
339 platform_device_register_simple("swim", -1, &swim_rsrc, 1);
340 diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
341 index c2a38321c96d..3b420f6d8822 100644
342 --- a/arch/m68k/mm/kmap.c
343 +++ b/arch/m68k/mm/kmap.c
344 @@ -89,7 +89,8 @@ static inline void free_io_area(void *addr)
345 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
346 if (tmp->addr == addr) {
347 *p = tmp->next;
348 - __iounmap(tmp->addr, tmp->size);
349 + /* remove gap added in get_io_area() */
350 + __iounmap(tmp->addr, tmp->size - IO_SIZE);
351 kfree(tmp);
352 return;
353 }
354 diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
355 index 6b2c6f3baefa..75fb96ca61db 100644
356 --- a/arch/mips/ath79/mach-pb44.c
357 +++ b/arch/mips/ath79/mach-pb44.c
358 @@ -34,7 +34,7 @@
359 #define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL)
360
361 static struct gpiod_lookup_table pb44_i2c_gpiod_table = {
362 - .dev_id = "i2c-gpio",
363 + .dev_id = "i2c-gpio.0",
364 .table = {
365 GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA,
366 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
367 diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
368 index 6054d49e608e..8c9cbf13d32a 100644
369 --- a/arch/mips/bcm47xx/setup.c
370 +++ b/arch/mips/bcm47xx/setup.c
371 @@ -212,6 +212,12 @@ static int __init bcm47xx_cpu_fixes(void)
372 */
373 if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
374 cpu_wait = NULL;
375 +
376 + /*
377 + * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
378 + * Enable ExternalSync for sync instruction to take effect
379 + */
380 + set_c0_config7(MIPS_CONF7_ES);
381 break;
382 #endif
383 }
384 diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
385 index a7d0b836f2f7..cea8ad864b3f 100644
386 --- a/arch/mips/include/asm/io.h
387 +++ b/arch/mips/include/asm/io.h
388 @@ -414,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
389 __val = *__addr; \
390 slow; \
391 \
392 + /* prevent prefetching of coherent DMA data prematurely */ \
393 + rmb(); \
394 return pfx##ioswab##bwlq(__addr, __val); \
395 }
396
397 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
398 index f65859784a4c..eeb131e2048e 100644
399 --- a/arch/mips/include/asm/mipsregs.h
400 +++ b/arch/mips/include/asm/mipsregs.h
401 @@ -681,6 +681,8 @@
402 #define MIPS_CONF7_WII (_ULCAST_(1) << 31)
403
404 #define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
405 +/* ExternalSync */
406 +#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
407
408 #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
409 #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
410 @@ -2760,6 +2762,7 @@ __BUILD_SET_C0(status)
411 __BUILD_SET_C0(cause)
412 __BUILD_SET_C0(config)
413 __BUILD_SET_C0(config5)
414 +__BUILD_SET_C0(config7)
415 __BUILD_SET_C0(intcontrol)
416 __BUILD_SET_C0(intctl)
417 __BUILD_SET_C0(srsmap)
418 diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
419 index f2ee7e1e3342..cff52b283e03 100644
420 --- a/arch/mips/kernel/mcount.S
421 +++ b/arch/mips/kernel/mcount.S
422 @@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra)
423 EXPORT_SYMBOL(_mcount)
424 PTR_LA t1, ftrace_stub
425 PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
426 - bne t1, t2, static_trace
427 + beq t1, t2, fgraph_trace
428 nop
429
430 + MCOUNT_SAVE_REGS
431 +
432 + move a0, ra /* arg1: self return address */
433 + jalr t2 /* (1) call *ftrace_trace_function */
434 + move a1, AT /* arg2: parent's return address */
435 +
436 + MCOUNT_RESTORE_REGS
437 +
438 +fgraph_trace:
439 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
440 + PTR_LA t1, ftrace_stub
441 PTR_L t3, ftrace_graph_return
442 bne t1, t3, ftrace_graph_caller
443 nop
444 @@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount)
445 bne t1, t3, ftrace_graph_caller
446 nop
447 #endif
448 - b ftrace_stub
449 -#ifdef CONFIG_32BIT
450 - addiu sp, sp, 8
451 -#else
452 - nop
453 -#endif
454
455 -static_trace:
456 - MCOUNT_SAVE_REGS
457 -
458 - move a0, ra /* arg1: self return address */
459 - jalr t2 /* (1) call *ftrace_trace_function */
460 - move a1, AT /* arg2: parent's return address */
461 -
462 - MCOUNT_RESTORE_REGS
463 #ifdef CONFIG_32BIT
464 addiu sp, sp, 8
465 #endif
466 +
467 .globl ftrace_stub
468 ftrace_stub:
469 RETURN_BACK
470 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
471 index 95813df90801..bb2523b4bd8f 100644
472 --- a/arch/powerpc/Makefile
473 +++ b/arch/powerpc/Makefile
474 @@ -251,6 +251,7 @@ cpu-as-$(CONFIG_4xx) += -Wa,-m405
475 cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
476 cpu-as-$(CONFIG_E200) += -Wa,-me200
477 cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
478 +cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
479
480 KBUILD_AFLAGS += $(cpu-as-y)
481 KBUILD_CFLAGS += $(cpu-as-y)
482 diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
483 index c904477abaf3..d926100da914 100644
484 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c
485 +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
486 @@ -711,7 +711,8 @@ static __init void cpufeatures_cpu_quirks(void)
487 cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
488 cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
489 cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
490 - } else /* DD2.1 and up have DD2_1 */
491 + } else if ((version & 0xffff0000) == 0x004e0000)
492 + /* DD2.1 and up have DD2_1 */
493 cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
494
495 if ((version & 0xffff0000) == 0x004e0000) {
496 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
497 index 51695608c68b..3d1af55e09dc 100644
498 --- a/arch/powerpc/kernel/entry_64.S
499 +++ b/arch/powerpc/kernel/entry_64.S
500 @@ -596,6 +596,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
501 * actually hit this code path.
502 */
503
504 + isync
505 slbie r6
506 slbie r6 /* Workaround POWER5 < DD2.1 issue */
507 slbmte r7,r0
508 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
509 index 3c2c2688918f..fe631022ea89 100644
510 --- a/arch/powerpc/kernel/fadump.c
511 +++ b/arch/powerpc/kernel/fadump.c
512 @@ -1155,6 +1155,9 @@ void fadump_cleanup(void)
513 init_fadump_mem_struct(&fdm,
514 be64_to_cpu(fdm_active->cpu_state_data.destination_address));
515 fadump_invalidate_dump(&fdm);
516 + } else if (fw_dump.dump_registered) {
517 + /* Un-register Firmware-assisted dump if it was registered. */
518 + fadump_unregister_dump(&fdm);
519 }
520 }
521
522 diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
523 index 4c1012b80d3b..80547dad37da 100644
524 --- a/arch/powerpc/kernel/hw_breakpoint.c
525 +++ b/arch/powerpc/kernel/hw_breakpoint.c
526 @@ -178,8 +178,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
527 if (cpu_has_feature(CPU_FTR_DAWR)) {
528 length_max = 512 ; /* 64 doublewords */
529 /* DAWR region can't cross 512 boundary */
530 - if ((bp->attr.bp_addr >> 10) !=
531 - ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
532 + if ((bp->attr.bp_addr >> 9) !=
533 + ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
534 return -EINVAL;
535 }
536 if (info->len >
537 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
538 index d23cf632edf0..0f63dd5972e9 100644
539 --- a/arch/powerpc/kernel/ptrace.c
540 +++ b/arch/powerpc/kernel/ptrace.c
541 @@ -2443,6 +2443,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
542 /* Create a new breakpoint request if one doesn't exist already */
543 hw_breakpoint_init(&attr);
544 attr.bp_addr = hw_brk.address;
545 + attr.bp_len = 8;
546 arch_bp_generic_fields(hw_brk.type,
547 &attr.bp_type);
548
549 diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
550 index 0eafdf01edc7..e6f500fabf5e 100644
551 --- a/arch/powerpc/mm/pkeys.c
552 +++ b/arch/powerpc/mm/pkeys.c
553 @@ -383,9 +383,9 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot,
554 {
555 /*
556 * If the currently associated pkey is execute-only, but the requested
557 - * protection requires read or write, move it back to the default pkey.
558 + * protection is not execute-only, move it back to the default pkey.
559 */
560 - if (vma_is_pkey_exec_only(vma) && (prot & (PROT_READ | PROT_WRITE)))
561 + if (vma_is_pkey_exec_only(vma) && (prot != PROT_EXEC))
562 return 0;
563
564 /*
565 diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
566 index a5d7309c2d05..465cb604b33a 100644
567 --- a/arch/powerpc/mm/tlb-radix.c
568 +++ b/arch/powerpc/mm/tlb-radix.c
569 @@ -733,6 +733,8 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
570 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
571 if (sib == cpu)
572 continue;
573 + if (!cpu_possible(sib))
574 + continue;
575 if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
576 flush = true;
577 }
578 diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
579 index d7532e7b9ab5..75fb23c24ee8 100644
580 --- a/arch/powerpc/perf/imc-pmu.c
581 +++ b/arch/powerpc/perf/imc-pmu.c
582 @@ -1146,7 +1146,7 @@ static int init_nest_pmu_ref(void)
583
584 static void cleanup_all_core_imc_memory(void)
585 {
586 - int i, nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
587 + int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
588 struct imc_mem_info *ptr = core_imc_pmu->mem_info;
589 int size = core_imc_pmu->counter_mem_size;
590
591 @@ -1264,7 +1264,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
592 if (!pmu_ptr->pmu.name)
593 return -ENOMEM;
594
595 - nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
596 + nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
597 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),
598 GFP_KERNEL);
599
600 diff --git a/arch/powerpc/platforms/powernv/copy-paste.h b/arch/powerpc/platforms/powernv/copy-paste.h
601 index c9a503623431..e9a6c35f8a29 100644
602 --- a/arch/powerpc/platforms/powernv/copy-paste.h
603 +++ b/arch/powerpc/platforms/powernv/copy-paste.h
604 @@ -42,5 +42,6 @@ static inline int vas_paste(void *paste_address, int offset)
605 : "b" (offset), "b" (paste_address)
606 : "memory", "cr0");
607
608 - return (cr >> CR0_SHIFT) & CR0_MASK;
609 + /* We mask with 0xE to ignore SO */
610 + return (cr >> CR0_SHIFT) & 0xE;
611 }
612 diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
613 index 1f12ab1e6030..1c5d0675b43c 100644
614 --- a/arch/powerpc/platforms/powernv/idle.c
615 +++ b/arch/powerpc/platforms/powernv/idle.c
616 @@ -79,7 +79,7 @@ static int pnv_save_sprs_for_deep_states(void)
617 uint64_t msr_val = MSR_IDLE;
618 uint64_t psscr_val = pnv_deepest_stop_psscr_val;
619
620 - for_each_possible_cpu(cpu) {
621 + for_each_present_cpu(cpu) {
622 uint64_t pir = get_hard_smp_processor_id(cpu);
623 uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];
624
625 @@ -814,7 +814,7 @@ static int __init pnv_init_idle_states(void)
626 int cpu;
627
628 pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n");
629 - for_each_possible_cpu(cpu) {
630 + for_each_present_cpu(cpu) {
631 int base_cpu = cpu_first_thread_sibling(cpu);
632 int idx = cpu_thread_in_core(cpu);
633 int i;
634 diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
635 index 3f9c69d7623a..f7d9b3433a29 100644
636 --- a/arch/powerpc/platforms/powernv/pci-ioda.c
637 +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
638 @@ -3642,7 +3642,6 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
639 WARN_ON(pe->table_group.group);
640 }
641
642 - pnv_pci_ioda2_table_free_pages(tbl);
643 iommu_tce_table_put(tbl);
644 }
645
646 diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
647 index 02168fe25105..cf9bf9b43ec3 100644
648 --- a/arch/um/drivers/vector_kern.c
649 +++ b/arch/um/drivers/vector_kern.c
650 @@ -188,7 +188,7 @@ static int get_transport_options(struct arglist *def)
651 if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
652 return (vec_rx | VECTOR_BPF);
653 if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
654 - return (vec_rx | vec_tx);
655 + return (vec_rx | vec_tx | VECTOR_QDISC_BYPASS);
656 return (vec_rx | vec_tx);
657 }
658
659 @@ -504,15 +504,19 @@ static struct vector_queue *create_queue(
660
661 result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL);
662 if (result == NULL)
663 - goto out_fail;
664 + return NULL;
665 result->max_depth = max_size;
666 result->dev = vp->dev;
667 result->mmsg_vector = kmalloc(
668 (sizeof(struct mmsghdr) * max_size), GFP_KERNEL);
669 + if (result->mmsg_vector == NULL)
670 + goto out_mmsg_fail;
671 result->skbuff_vector = kmalloc(
672 (sizeof(void *) * max_size), GFP_KERNEL);
673 - if (result->mmsg_vector == NULL || result->skbuff_vector == NULL)
674 - goto out_fail;
675 + if (result->skbuff_vector == NULL)
676 + goto out_skb_fail;
677 +
678 + /* further failures can be handled safely by destroy_queue*/
679
680 mmsg_vector = result->mmsg_vector;
681 for (i = 0; i < max_size; i++) {
682 @@ -563,6 +567,11 @@ static struct vector_queue *create_queue(
683 result->head = 0;
684 result->tail = 0;
685 return result;
686 +out_skb_fail:
687 + kfree(result->mmsg_vector);
688 +out_mmsg_fail:
689 + kfree(result);
690 + return NULL;
691 out_fail:
692 destroy_queue(result);
693 return NULL;
694 @@ -1232,9 +1241,8 @@ static int vector_net_open(struct net_device *dev)
695
696 if ((vp->options & VECTOR_QDISC_BYPASS) != 0) {
697 if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
698 - vp->options = vp->options | VECTOR_BPF;
699 + vp->options |= VECTOR_BPF;
700 }
701 -
702 if ((vp->options & VECTOR_BPF) != 0)
703 vp->bpf = uml_vector_default_bpf(vp->fds->rx_fd, dev->dev_addr);
704
705 diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
706 index 9de7f1e1dede..7d0df78db727 100644
707 --- a/arch/x86/entry/entry_64_compat.S
708 +++ b/arch/x86/entry/entry_64_compat.S
709 @@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat)
710 pushq %rdx /* pt_regs->dx */
711 pushq %rcx /* pt_regs->cx */
712 pushq $-ENOSYS /* pt_regs->ax */
713 - pushq %r8 /* pt_regs->r8 */
714 + pushq $0 /* pt_regs->r8 = 0 */
715 xorl %r8d, %r8d /* nospec r8 */
716 - pushq %r9 /* pt_regs->r9 */
717 + pushq $0 /* pt_regs->r9 = 0 */
718 xorl %r9d, %r9d /* nospec r9 */
719 - pushq %r10 /* pt_regs->r10 */
720 + pushq $0 /* pt_regs->r10 = 0 */
721 xorl %r10d, %r10d /* nospec r10 */
722 - pushq %r11 /* pt_regs->r11 */
723 + pushq $0 /* pt_regs->r11 = 0 */
724 xorl %r11d, %r11d /* nospec r11 */
725 pushq %rbx /* pt_regs->rbx */
726 xorl %ebx, %ebx /* nospec rbx */
727 @@ -374,13 +374,13 @@ ENTRY(entry_INT80_compat)
728 pushq %rcx /* pt_regs->cx */
729 xorl %ecx, %ecx /* nospec cx */
730 pushq $-ENOSYS /* pt_regs->ax */
731 - pushq $0 /* pt_regs->r8 = 0 */
732 + pushq %r8 /* pt_regs->r8 */
733 xorl %r8d, %r8d /* nospec r8 */
734 - pushq $0 /* pt_regs->r9 = 0 */
735 + pushq %r9 /* pt_regs->r9 */
736 xorl %r9d, %r9d /* nospec r9 */
737 - pushq $0 /* pt_regs->r10 = 0 */
738 + pushq %r10 /* pt_regs->r10*/
739 xorl %r10d, %r10d /* nospec r10 */
740 - pushq $0 /* pt_regs->r11 = 0 */
741 + pushq %r11 /* pt_regs->r11 */
742 xorl %r11d, %r11d /* nospec r11 */
743 pushq %rbx /* pt_regs->rbx */
744 xorl %ebx, %ebx /* nospec rbx */
745 diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
746 index 042b5e892ed1..14de0432d288 100644
747 --- a/arch/x86/include/asm/barrier.h
748 +++ b/arch/x86/include/asm/barrier.h
749 @@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
750 {
751 unsigned long mask;
752
753 - asm ("cmp %1,%2; sbb %0,%0;"
754 + asm volatile ("cmp %1,%2; sbb %0,%0;"
755 :"=r" (mask)
756 :"g"(size),"r" (index)
757 :"cc");
758 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
759 index efaf2d4f9c3c..d492752f79e1 100644
760 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
761 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
762 @@ -26,6 +26,7 @@
763 #include <linux/delay.h>
764 #include <linux/crash_dump.h>
765 #include <linux/reboot.h>
766 +#include <linux/memory.h>
767
768 #include <asm/uv/uv_mmrs.h>
769 #include <asm/uv/uv_hub.h>
770 @@ -392,6 +393,51 @@ extern int uv_hub_info_version(void)
771 }
772 EXPORT_SYMBOL(uv_hub_info_version);
773
774 +/* Default UV memory block size is 2GB */
775 +static unsigned long mem_block_size = (2UL << 30);
776 +
777 +/* Kernel parameter to specify UV mem block size */
778 +static int parse_mem_block_size(char *ptr)
779 +{
780 + unsigned long size = memparse(ptr, NULL);
781 +
782 + /* Size will be rounded down by set_block_size() below */
783 + mem_block_size = size;
784 + return 0;
785 +}
786 +early_param("uv_memblksize", parse_mem_block_size);
787 +
788 +static __init int adj_blksize(u32 lgre)
789 +{
790 + unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT;
791 + unsigned long size;
792 +
793 + for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1)
794 + if (IS_ALIGNED(base, size))
795 + break;
796 +
797 + if (size >= mem_block_size)
798 + return 0;
799 +
800 + mem_block_size = size;
801 + return 1;
802 +}
803 +
804 +static __init void set_block_size(void)
805 +{
806 + unsigned int order = ffs(mem_block_size);
807 +
808 + if (order) {
809 + /* adjust for ffs return of 1..64 */
810 + set_memory_block_size_order(order - 1);
811 + pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size);
812 + } else {
813 + /* bad or zero value, default to 1UL << 31 (2GB) */
814 + pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size);
815 + set_memory_block_size_order(31);
816 + }
817 +}
818 +
819 /* Build GAM range lookup table: */
820 static __init void build_uv_gr_table(void)
821 {
822 @@ -1180,23 +1226,30 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
823 << UV_GAM_RANGE_SHFT);
824 int order = 0;
825 char suffix[] = " KMGTPE";
826 + int flag = ' ';
827
828 while (size > 9999 && order < sizeof(suffix)) {
829 size /= 1024;
830 order++;
831 }
832
833 + /* adjust max block size to current range start */
834 + if (gre->type == 1 || gre->type == 2)
835 + if (adj_blksize(lgre))
836 + flag = '*';
837 +
838 if (!index) {
839 pr_info("UV: GAM Range Table...\n");
840 - pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
841 + pr_info("UV: # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
842 }
843 - pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n",
844 + pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d %04x %02x %02x\n",
845 index++,
846 (unsigned long)lgre << UV_GAM_RANGE_SHFT,
847 (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
848 - size, suffix[order],
849 + flag, size, suffix[order],
850 gre->type, gre->nasid, gre->sockid, gre->pnode);
851
852 + /* update to next range start */
853 lgre = gre->limit;
854 if (sock_min > gre->sockid)
855 sock_min = gre->sockid;
856 @@ -1427,6 +1480,7 @@ static void __init uv_system_init_hub(void)
857
858 build_socket_tables();
859 build_uv_gr_table();
860 + set_block_size();
861 uv_init_hub_info(&hub_info);
862 uv_possible_blades = num_possible_nodes();
863 if (!_node_to_pnode)
864 diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
865 index 5bbd06f38ff6..f34d89c01edc 100644
866 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
867 +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
868 @@ -160,6 +160,11 @@ static struct severity {
869 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
870 USER
871 ),
872 + MCESEV(
873 + PANIC, "Data load in unrecoverable area of kernel",
874 + SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
875 + KERNEL
876 + ),
877 #endif
878 MCESEV(
879 PANIC, "Action required: unknown MCACOD",
880 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
881 index 42cf2880d0ed..6f7eda9d5297 100644
882 --- a/arch/x86/kernel/cpu/mcheck/mce.c
883 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
884 @@ -772,23 +772,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
885 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
886 struct pt_regs *regs)
887 {
888 - int i, ret = 0;
889 char *tmp;
890 + int i;
891
892 for (i = 0; i < mca_cfg.banks; i++) {
893 m->status = mce_rdmsrl(msr_ops.status(i));
894 - if (m->status & MCI_STATUS_VAL) {
895 - __set_bit(i, validp);
896 - if (quirk_no_way_out)
897 - quirk_no_way_out(i, m, regs);
898 - }
899 + if (!(m->status & MCI_STATUS_VAL))
900 + continue;
901 +
902 + __set_bit(i, validp);
903 + if (quirk_no_way_out)
904 + quirk_no_way_out(i, m, regs);
905
906 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
907 + mce_read_aux(m, i);
908 *msg = tmp;
909 - ret = 1;
910 + return 1;
911 }
912 }
913 - return ret;
914 + return 0;
915 }
916
917 /*
918 @@ -1205,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code)
919 lmce = m.mcgstatus & MCG_STATUS_LMCES;
920
921 /*
922 + * Local machine check may already know that we have to panic.
923 + * Broadcast machine check begins rendezvous in mce_start()
924 * Go through all banks in exclusion of the other CPUs. This way we
925 * don't report duplicated events on shared banks because the first one
926 - * to see it will clear it. If this is a Local MCE, then no need to
927 - * perform rendezvous.
928 + * to see it will clear it.
929 */
930 - if (!lmce)
931 + if (lmce) {
932 + if (no_way_out)
933 + mce_panic("Fatal local machine check", &m, msg);
934 + } else {
935 order = mce_start(&no_way_out);
936 + }
937
938 for (i = 0; i < cfg->banks; i++) {
939 __clear_bit(i, toclear);
940 @@ -1287,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
941 no_way_out = worst >= MCE_PANIC_SEVERITY;
942 } else {
943 /*
944 - * Local MCE skipped calling mce_reign()
945 - * If we found a fatal error, we need to panic here.
946 + * If there was a fatal machine check we should have
947 + * already called mce_panic earlier in this function.
948 + * Since we re-read the banks, we might have found
949 + * something new. Check again to see if we found a
950 + * fatal error. We call "mce_severity()" again to
951 + * make sure we have the right "msg".
952 */
953 - if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
954 - mce_panic("Machine check from unknown source",
955 - NULL, NULL);
956 + if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
957 + mce_severity(&m, cfg->tolerant, &msg, true);
958 + mce_panic("Local fatal machine check!", &m, msg);
959 + }
960 }
961
962 /*
963 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
964 index 6a2cb1442e05..aec38a170dbc 100644
965 --- a/arch/x86/kernel/e820.c
966 +++ b/arch/x86/kernel/e820.c
967 @@ -1246,6 +1246,7 @@ void __init e820__memblock_setup(void)
968 {
969 int i;
970 u64 end;
971 + u64 addr = 0;
972
973 /*
974 * The bootstrap memblock region count maximum is 128 entries
975 @@ -1262,13 +1263,21 @@ void __init e820__memblock_setup(void)
976 struct e820_entry *entry = &e820_table->entries[i];
977
978 end = entry->addr + entry->size;
979 + if (addr < entry->addr)
980 + memblock_reserve(addr, entry->addr - addr);
981 + addr = end;
982 if (end != (resource_size_t)end)
983 continue;
984
985 + /*
986 + * all !E820_TYPE_RAM ranges (including gap ranges) are put
987 + * into memblock.reserved to make sure that struct pages in
988 + * such regions are not left uninitialized after bootup.
989 + */
990 if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
991 - continue;
992 -
993 - memblock_add(entry->addr, entry->size);
994 + memblock_reserve(entry->addr, entry->size);
995 + else
996 + memblock_add(entry->addr, entry->size);
997 }
998
999 /* Throw away partial pages: */
1000 diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
1001 index 697a4ce04308..736348ead421 100644
1002 --- a/arch/x86/kernel/quirks.c
1003 +++ b/arch/x86/kernel/quirks.c
1004 @@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
1005 /* Skylake */
1006 static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
1007 {
1008 - u32 capid0;
1009 + u32 capid0, capid5;
1010
1011 pci_read_config_dword(pdev, 0x84, &capid0);
1012 + pci_read_config_dword(pdev, 0x98, &capid5);
1013
1014 - if ((capid0 & 0xc0) == 0xc0)
1015 + /*
1016 + * CAPID0{7:6} indicate whether this is an advanced RAS SKU
1017 + * CAPID5{8:5} indicate that various NVDIMM usage modes are
1018 + * enabled, so memory machine check recovery is also enabled.
1019 + */
1020 + if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
1021 static_branch_inc(&mcsafe_key);
1022 +
1023 }
1024 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
1025 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
1026 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
1027 index 03f3d7695dac..162a31d80ad5 100644
1028 --- a/arch/x86/kernel/traps.c
1029 +++ b/arch/x86/kernel/traps.c
1030 @@ -834,16 +834,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
1031 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
1032 "simd exception";
1033
1034 - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
1035 - return;
1036 cond_local_irq_enable(regs);
1037
1038 if (!user_mode(regs)) {
1039 - if (!fixup_exception(regs, trapnr)) {
1040 - task->thread.error_code = error_code;
1041 - task->thread.trap_nr = trapnr;
1042 + if (fixup_exception(regs, trapnr))
1043 + return;
1044 +
1045 + task->thread.error_code = error_code;
1046 + task->thread.trap_nr = trapnr;
1047 +
1048 + if (notify_die(DIE_TRAP, str, regs, error_code,
1049 + trapnr, SIGFPE) != NOTIFY_STOP)
1050 die(str, regs, error_code);
1051 - }
1052 return;
1053 }
1054
1055 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
1056 index fec82b577c18..cee58a972cb2 100644
1057 --- a/arch/x86/mm/init.c
1058 +++ b/arch/x86/mm/init.c
1059 @@ -706,7 +706,9 @@ void __init init_mem_mapping(void)
1060 */
1061 int devmem_is_allowed(unsigned long pagenr)
1062 {
1063 - if (page_is_ram(pagenr)) {
1064 + if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
1065 + IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
1066 + != REGION_DISJOINT) {
1067 /*
1068 * For disallowed memory regions in the low 1MB range,
1069 * request that the page be shown as all zeros.
1070 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
1071 index 0a400606dea0..20d8bf5fbceb 100644
1072 --- a/arch/x86/mm/init_64.c
1073 +++ b/arch/x86/mm/init_64.c
1074 @@ -1350,16 +1350,28 @@ int kern_addr_valid(unsigned long addr)
1075 /* Amount of ram needed to start using large blocks */
1076 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
1077
1078 +/* Adjustable memory block size */
1079 +static unsigned long set_memory_block_size;
1080 +int __init set_memory_block_size_order(unsigned int order)
1081 +{
1082 + unsigned long size = 1UL << order;
1083 +
1084 + if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
1085 + return -EINVAL;
1086 +
1087 + set_memory_block_size = size;
1088 + return 0;
1089 +}
1090 +
1091 static unsigned long probe_memory_block_size(void)
1092 {
1093 unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
1094 unsigned long bz;
1095
1096 - /* If this is UV system, always set 2G block size */
1097 - if (is_uv_system()) {
1098 - bz = MAX_BLOCK_SIZE;
1099 + /* If memory block size has been set, then use it */
1100 + bz = set_memory_block_size;
1101 + if (bz)
1102 goto done;
1103 - }
1104
1105 /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
1106 if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
1107 diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
1108 index bed7e7f4e44c..84fbfaba8404 100644
1109 --- a/arch/x86/platform/efi/efi_64.c
1110 +++ b/arch/x86/platform/efi/efi_64.c
1111 @@ -166,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
1112 pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
1113 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
1114
1115 - if (!(pgd_val(*pgd) & _PAGE_PRESENT))
1116 + if (!pgd_present(*pgd))
1117 continue;
1118
1119 for (i = 0; i < PTRS_PER_P4D; i++) {
1120 p4d = p4d_offset(pgd,
1121 pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
1122
1123 - if (!(p4d_val(*p4d) & _PAGE_PRESENT))
1124 + if (!p4d_present(*p4d))
1125 continue;
1126
1127 pud = (pud_t *)p4d_page_vaddr(*p4d);
1128 diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
1129 index 2e20ae2fa2d6..e3b18ad49889 100644
1130 --- a/arch/x86/xen/smp_pv.c
1131 +++ b/arch/x86/xen/smp_pv.c
1132 @@ -32,6 +32,7 @@
1133 #include <xen/interface/vcpu.h>
1134 #include <xen/interface/xenpmu.h>
1135
1136 +#include <asm/spec-ctrl.h>
1137 #include <asm/xen/interface.h>
1138 #include <asm/xen/hypercall.h>
1139
1140 @@ -70,6 +71,8 @@ static void cpu_bringup(void)
1141 cpu_data(cpu).x86_max_cores = 1;
1142 set_cpu_sibling_map(cpu);
1143
1144 + speculative_store_bypass_ht_init();
1145 +
1146 xen_setup_cpu_clockevents();
1147
1148 notify_cpu_starting(cpu);
1149 @@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
1150 }
1151 set_cpu_sibling_map(0);
1152
1153 + speculative_store_bypass_ht_init();
1154 +
1155 xen_pmu_init(0);
1156
1157 if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
1158 diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
1159 index 32c5207f1226..84a70b8cbe33 100644
1160 --- a/arch/xtensa/kernel/traps.c
1161 +++ b/arch/xtensa/kernel/traps.c
1162 @@ -338,7 +338,7 @@ do_unaligned_user (struct pt_regs *regs)
1163 info.si_errno = 0;
1164 info.si_code = BUS_ADRALN;
1165 info.si_addr = (void *) regs->excvaddr;
1166 - force_sig_info(SIGSEGV, &info, current);
1167 + force_sig_info(SIGBUS, &info, current);
1168
1169 }
1170 #endif
1171 diff --git a/block/blk-core.c b/block/blk-core.c
1172 index 85909b431eb0..b559b9d4f1a2 100644
1173 --- a/block/blk-core.c
1174 +++ b/block/blk-core.c
1175 @@ -3487,6 +3487,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
1176 dst->cpu = src->cpu;
1177 dst->__sector = blk_rq_pos(src);
1178 dst->__data_len = blk_rq_bytes(src);
1179 + if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
1180 + dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
1181 + dst->special_vec = src->special_vec;
1182 + }
1183 dst->nr_phys_segments = src->nr_phys_segments;
1184 dst->ioprio = src->ioprio;
1185 dst->extra_len = src->extra_len;
1186 diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
1187 index 7d81e6bb461a..b6cabac4b62b 100644
1188 --- a/crypto/asymmetric_keys/x509_cert_parser.c
1189 +++ b/crypto/asymmetric_keys/x509_cert_parser.c
1190 @@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen,
1191 return -EINVAL;
1192 }
1193
1194 + if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
1195 + /* Discard the BIT STRING metadata */
1196 + if (vlen < 1 || *(const u8 *)value != 0)
1197 + return -EBADMSG;
1198 +
1199 + value++;
1200 + vlen--;
1201 + }
1202 +
1203 ctx->cert->raw_sig = value;
1204 ctx->cert->raw_sig_size = vlen;
1205 return 0;
1206 diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
1207 index 2bcffec8dbf0..eb091375c873 100644
1208 --- a/drivers/acpi/acpi_lpss.c
1209 +++ b/drivers/acpi/acpi_lpss.c
1210 @@ -22,6 +22,7 @@
1211 #include <linux/pm_domain.h>
1212 #include <linux/pm_runtime.h>
1213 #include <linux/pwm.h>
1214 +#include <linux/suspend.h>
1215 #include <linux/delay.h>
1216
1217 #include "internal.h"
1218 @@ -229,11 +230,13 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = {
1219
1220 static const struct lpss_device_desc byt_pwm_dev_desc = {
1221 .flags = LPSS_SAVE_CTX,
1222 + .prv_offset = 0x800,
1223 .setup = byt_pwm_setup,
1224 };
1225
1226 static const struct lpss_device_desc bsw_pwm_dev_desc = {
1227 .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
1228 + .prv_offset = 0x800,
1229 .setup = bsw_pwm_setup,
1230 };
1231
1232 @@ -940,9 +943,10 @@ static void lpss_iosf_exit_d3_state(void)
1233 mutex_unlock(&lpss_iosf_mutex);
1234 }
1235
1236 -static int acpi_lpss_suspend(struct device *dev, bool wakeup)
1237 +static int acpi_lpss_suspend(struct device *dev, bool runtime)
1238 {
1239 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1240 + bool wakeup = runtime || device_may_wakeup(dev);
1241 int ret;
1242
1243 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
1244 @@ -955,13 +959,14 @@ static int acpi_lpss_suspend(struct device *dev, bool wakeup)
1245 * wrong status for devices being about to be powered off. See
1246 * lpss_iosf_enter_d3_state() for further information.
1247 */
1248 - if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1249 + if ((runtime || !pm_suspend_via_firmware()) &&
1250 + lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1251 lpss_iosf_enter_d3_state();
1252
1253 return ret;
1254 }
1255
1256 -static int acpi_lpss_resume(struct device *dev)
1257 +static int acpi_lpss_resume(struct device *dev, bool runtime)
1258 {
1259 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
1260 int ret;
1261 @@ -970,7 +975,8 @@ static int acpi_lpss_resume(struct device *dev)
1262 * This call is kept first to be in symmetry with
1263 * acpi_lpss_runtime_suspend() one.
1264 */
1265 - if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1266 + if ((runtime || !pm_resume_via_firmware()) &&
1267 + lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
1268 lpss_iosf_exit_d3_state();
1269
1270 ret = acpi_dev_resume(dev);
1271 @@ -994,12 +1000,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
1272 return 0;
1273
1274 ret = pm_generic_suspend_late(dev);
1275 - return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1276 + return ret ? ret : acpi_lpss_suspend(dev, false);
1277 }
1278
1279 static int acpi_lpss_resume_early(struct device *dev)
1280 {
1281 - int ret = acpi_lpss_resume(dev);
1282 + int ret = acpi_lpss_resume(dev, false);
1283
1284 return ret ? ret : pm_generic_resume_early(dev);
1285 }
1286 @@ -1014,7 +1020,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
1287
1288 static int acpi_lpss_runtime_resume(struct device *dev)
1289 {
1290 - int ret = acpi_lpss_resume(dev);
1291 + int ret = acpi_lpss_resume(dev, true);
1292
1293 return ret ? ret : pm_generic_runtime_resume(dev);
1294 }
1295 diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
1296 index 2c2ed9cf8796..f9413755177b 100644
1297 --- a/drivers/auxdisplay/Kconfig
1298 +++ b/drivers/auxdisplay/Kconfig
1299 @@ -14,9 +14,6 @@ menuconfig AUXDISPLAY
1300
1301 If you say N, all options in this submenu will be skipped and disabled.
1302
1303 -config CHARLCD
1304 - tristate "Character LCD core support" if COMPILE_TEST
1305 -
1306 if AUXDISPLAY
1307
1308 config HD44780
1309 @@ -157,8 +154,6 @@ config HT16K33
1310 Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
1311 LED controller driver with keyscan.
1312
1313 -endif # AUXDISPLAY
1314 -
1315 config ARM_CHARLCD
1316 bool "ARM Ltd. Character LCD Driver"
1317 depends on PLAT_VERSATILE
1318 @@ -169,6 +164,8 @@ config ARM_CHARLCD
1319 line and the Linux version on the second line, but that's
1320 still useful.
1321
1322 +endif # AUXDISPLAY
1323 +
1324 config PANEL
1325 tristate "Parallel port LCD/Keypad Panel support"
1326 depends on PARPORT
1327 @@ -448,3 +445,6 @@ config PANEL_BOOT_MESSAGE
1328 printf()-formatted message is valid with newline and escape codes.
1329
1330 endif # PANEL
1331 +
1332 +config CHARLCD
1333 + tristate "Character LCD core support" if COMPILE_TEST
1334 diff --git a/drivers/base/core.c b/drivers/base/core.c
1335 index d680fd030316..f4ba878dd2dc 100644
1336 --- a/drivers/base/core.c
1337 +++ b/drivers/base/core.c
1338 @@ -216,6 +216,13 @@ struct device_link *device_link_add(struct device *consumer,
1339 link->rpm_active = true;
1340 }
1341 pm_runtime_new_link(consumer);
1342 + /*
1343 + * If the link is being added by the consumer driver at probe
1344 + * time, balance the decrementation of the supplier's runtime PM
1345 + * usage counter after consumer probe in driver_probe_device().
1346 + */
1347 + if (consumer->links.status == DL_DEV_PROBING)
1348 + pm_runtime_get_noresume(supplier);
1349 }
1350 get_device(supplier);
1351 link->supplier = supplier;
1352 @@ -235,12 +242,12 @@ struct device_link *device_link_add(struct device *consumer,
1353 switch (consumer->links.status) {
1354 case DL_DEV_PROBING:
1355 /*
1356 - * Balance the decrementation of the supplier's
1357 - * runtime PM usage counter after consumer probe
1358 - * in driver_probe_device().
1359 + * Some callers expect the link creation during
1360 + * consumer driver probe to resume the supplier
1361 + * even without DL_FLAG_RPM_ACTIVE.
1362 */
1363 if (flags & DL_FLAG_PM_RUNTIME)
1364 - pm_runtime_get_sync(supplier);
1365 + pm_runtime_resume(supplier);
1366
1367 link->status = DL_STATE_CONSUMER_PROBE;
1368 break;
1369 diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
1370 index 1ea0e2502e8e..ef6cf3d5d2b5 100644
1371 --- a/drivers/base/power/domain.c
1372 +++ b/drivers/base/power/domain.c
1373 @@ -2246,6 +2246,9 @@ int genpd_dev_pm_attach(struct device *dev)
1374 genpd_lock(pd);
1375 ret = genpd_power_on(pd, 0);
1376 genpd_unlock(pd);
1377 +
1378 + if (ret)
1379 + genpd_remove_device(pd, dev);
1380 out:
1381 return ret ? -EPROBE_DEFER : 0;
1382 }
1383 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
1384 index 33b36fea1d73..472afeed1d2f 100644
1385 --- a/drivers/block/rbd.c
1386 +++ b/drivers/block/rbd.c
1387 @@ -3397,7 +3397,6 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev)
1388 {
1389 dout("%s rbd_dev %p\n", __func__, rbd_dev);
1390
1391 - cancel_delayed_work_sync(&rbd_dev->watch_dwork);
1392 cancel_work_sync(&rbd_dev->acquired_lock_work);
1393 cancel_work_sync(&rbd_dev->released_lock_work);
1394 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
1395 @@ -3415,6 +3414,7 @@ static void rbd_unregister_watch(struct rbd_device *rbd_dev)
1396 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
1397 mutex_unlock(&rbd_dev->watch_mutex);
1398
1399 + cancel_delayed_work_sync(&rbd_dev->watch_dwork);
1400 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
1401 }
1402
1403 diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
1404 index 05ec530b8a3a..330e9b29e145 100644
1405 --- a/drivers/bluetooth/hci_qca.c
1406 +++ b/drivers/bluetooth/hci_qca.c
1407 @@ -935,6 +935,12 @@ static int qca_setup(struct hci_uart *hu)
1408 } else if (ret == -ENOENT) {
1409 /* No patch/nvm-config found, run with original fw/config */
1410 ret = 0;
1411 + } else if (ret == -EAGAIN) {
1412 + /*
1413 + * Userspace firmware loader will return -EAGAIN in case no
1414 + * patch/nvm-config is found, so run with original fw/config.
1415 + */
1416 + ret = 0;
1417 }
1418
1419 /* Setup bdaddr */
1420 diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
1421 index 91bb98c42a1c..aaf9e5afaad4 100644
1422 --- a/drivers/char/hw_random/core.c
1423 +++ b/drivers/char/hw_random/core.c
1424 @@ -516,11 +516,18 @@ EXPORT_SYMBOL_GPL(hwrng_register);
1425
1426 void hwrng_unregister(struct hwrng *rng)
1427 {
1428 + int err;
1429 +
1430 mutex_lock(&rng_mutex);
1431
1432 list_del(&rng->list);
1433 - if (current_rng == rng)
1434 - enable_best_rng();
1435 + if (current_rng == rng) {
1436 + err = enable_best_rng();
1437 + if (err) {
1438 + drop_current_rng();
1439 + cur_rng_set_by_user = 0;
1440 + }
1441 + }
1442
1443 if (list_empty(&rng_list)) {
1444 mutex_unlock(&rng_mutex);
1445 diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
1446 index fd4ea8d87d4b..a3397664f800 100644
1447 --- a/drivers/char/ipmi/ipmi_bt_sm.c
1448 +++ b/drivers/char/ipmi/ipmi_bt_sm.c
1449 @@ -504,11 +504,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
1450 if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
1451 BT_CONTROL(BT_H_BUSY);
1452
1453 + bt->timeout = bt->BT_CAP_req2rsp;
1454 +
1455 /* Read BT capabilities if it hasn't been done yet */
1456 if (!bt->BT_CAP_outreqs)
1457 BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
1458 SI_SM_CALL_WITHOUT_DELAY);
1459 - bt->timeout = bt->BT_CAP_req2rsp;
1460 BT_SI_SM_RETURN(SI_SM_IDLE);
1461
1462 case BT_STATE_XACTION_START:
1463 diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
1464 index 230b99288024..e4a04b2d3c32 100644
1465 --- a/drivers/char/tpm/tpm-dev-common.c
1466 +++ b/drivers/char/tpm/tpm-dev-common.c
1467 @@ -37,7 +37,7 @@ static void timeout_work(struct work_struct *work)
1468 struct file_priv *priv = container_of(work, struct file_priv, work);
1469
1470 mutex_lock(&priv->buffer_mutex);
1471 - atomic_set(&priv->data_pending, 0);
1472 + priv->data_pending = 0;
1473 memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
1474 mutex_unlock(&priv->buffer_mutex);
1475 }
1476 @@ -46,7 +46,6 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
1477 struct file_priv *priv)
1478 {
1479 priv->chip = chip;
1480 - atomic_set(&priv->data_pending, 0);
1481 mutex_init(&priv->buffer_mutex);
1482 timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
1483 INIT_WORK(&priv->work, timeout_work);
1484 @@ -58,29 +57,24 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
1485 size_t size, loff_t *off)
1486 {
1487 struct file_priv *priv = file->private_data;
1488 - ssize_t ret_size;
1489 - ssize_t orig_ret_size;
1490 + ssize_t ret_size = 0;
1491 int rc;
1492
1493 del_singleshot_timer_sync(&priv->user_read_timer);
1494 flush_work(&priv->work);
1495 - ret_size = atomic_read(&priv->data_pending);
1496 - if (ret_size > 0) { /* relay data */
1497 - orig_ret_size = ret_size;
1498 - if (size < ret_size)
1499 - ret_size = size;
1500 + mutex_lock(&priv->buffer_mutex);
1501
1502 - mutex_lock(&priv->buffer_mutex);
1503 + if (priv->data_pending) {
1504 + ret_size = min_t(ssize_t, size, priv->data_pending);
1505 rc = copy_to_user(buf, priv->data_buffer, ret_size);
1506 - memset(priv->data_buffer, 0, orig_ret_size);
1507 + memset(priv->data_buffer, 0, priv->data_pending);
1508 if (rc)
1509 ret_size = -EFAULT;
1510
1511 - mutex_unlock(&priv->buffer_mutex);
1512 + priv->data_pending = 0;
1513 }
1514
1515 - atomic_set(&priv->data_pending, 0);
1516 -
1517 + mutex_unlock(&priv->buffer_mutex);
1518 return ret_size;
1519 }
1520
1521 @@ -91,17 +85,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
1522 size_t in_size = size;
1523 ssize_t out_size;
1524
1525 + if (in_size > TPM_BUFSIZE)
1526 + return -E2BIG;
1527 +
1528 + mutex_lock(&priv->buffer_mutex);
1529 +
1530 /* Cannot perform a write until the read has cleared either via
1531 * tpm_read or a user_read_timer timeout. This also prevents split
1532 * buffered writes from blocking here.
1533 */
1534 - if (atomic_read(&priv->data_pending) != 0)
1535 + if (priv->data_pending != 0) {
1536 + mutex_unlock(&priv->buffer_mutex);
1537 return -EBUSY;
1538 -
1539 - if (in_size > TPM_BUFSIZE)
1540 - return -E2BIG;
1541 -
1542 - mutex_lock(&priv->buffer_mutex);
1543 + }
1544
1545 if (copy_from_user
1546 (priv->data_buffer, (void __user *) buf, in_size)) {
1547 @@ -132,7 +128,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
1548 return out_size;
1549 }
1550
1551 - atomic_set(&priv->data_pending, out_size);
1552 + priv->data_pending = out_size;
1553 mutex_unlock(&priv->buffer_mutex);
1554
1555 /* Set a timeout by which the reader must come claim the result */
1556 @@ -149,5 +145,5 @@ void tpm_common_release(struct file *file, struct file_priv *priv)
1557 del_singleshot_timer_sync(&priv->user_read_timer);
1558 flush_work(&priv->work);
1559 file->private_data = NULL;
1560 - atomic_set(&priv->data_pending, 0);
1561 + priv->data_pending = 0;
1562 }
1563 diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
1564 index ba3b6f9dacf7..b24cfb4d3ee1 100644
1565 --- a/drivers/char/tpm/tpm-dev.h
1566 +++ b/drivers/char/tpm/tpm-dev.h
1567 @@ -8,7 +8,7 @@ struct file_priv {
1568 struct tpm_chip *chip;
1569
1570 /* Data passed to and from the tpm via the read/write calls */
1571 - atomic_t data_pending;
1572 + size_t data_pending;
1573 struct mutex buffer_mutex;
1574
1575 struct timer_list user_read_timer; /* user needs to claim result */
1576 diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
1577 index 4e4014eabdb9..6122d3276f72 100644
1578 --- a/drivers/char/tpm/tpm2-space.c
1579 +++ b/drivers/char/tpm/tpm2-space.c
1580 @@ -102,8 +102,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
1581 * TPM_RC_REFERENCE_H0 means the session has been
1582 * flushed outside the space
1583 */
1584 - rc = -ENOENT;
1585 + *handle = 0;
1586 tpm_buf_destroy(&tbuf);
1587 + return -ENOENT;
1588 } else if (rc > 0) {
1589 dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
1590 __func__, rc);
1591 diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
1592 index 7d3223fc7161..72b6091eb7b9 100644
1593 --- a/drivers/clk/at91/clk-pll.c
1594 +++ b/drivers/clk/at91/clk-pll.c
1595 @@ -132,19 +132,8 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
1596 unsigned long parent_rate)
1597 {
1598 struct clk_pll *pll = to_clk_pll(hw);
1599 - unsigned int pllr;
1600 - u16 mul;
1601 - u8 div;
1602 -
1603 - regmap_read(pll->regmap, PLL_REG(pll->id), &pllr);
1604 -
1605 - div = PLL_DIV(pllr);
1606 - mul = PLL_MUL(pllr, pll->layout);
1607 -
1608 - if (!div || !mul)
1609 - return 0;
1610
1611 - return (parent_rate / div) * (mul + 1);
1612 + return (parent_rate / pll->div) * (pll->mul + 1);
1613 }
1614
1615 static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
1616 diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
1617 index 5eb50c31e455..2c23e7d7ba28 100644
1618 --- a/drivers/clk/clk-aspeed.c
1619 +++ b/drivers/clk/clk-aspeed.c
1620 @@ -88,7 +88,7 @@ static const struct aspeed_gate_data aspeed_gates[] = {
1621 [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
1622 [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
1623 [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
1624 - [ASPEED_CLK_GATE_BCLK] = { 4, 10, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
1625 + [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
1626 [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */
1627 [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
1628 [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
1629 @@ -297,7 +297,7 @@ static const u8 aspeed_resets[] = {
1630 [ASPEED_RESET_JTAG_MASTER] = 22,
1631 [ASPEED_RESET_MIC] = 18,
1632 [ASPEED_RESET_PWM] = 9,
1633 - [ASPEED_RESET_PCIVGA] = 8,
1634 + [ASPEED_RESET_PECI] = 10,
1635 [ASPEED_RESET_I2C] = 2,
1636 [ASPEED_RESET_AHB] = 1,
1637 };
1638 diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
1639 index d0524ec71aad..d0d320180c51 100644
1640 --- a/drivers/clk/meson/meson8b.c
1641 +++ b/drivers/clk/meson/meson8b.c
1642 @@ -246,6 +246,13 @@ static struct clk_regmap meson8b_fclk_div2 = {
1643 .ops = &clk_regmap_gate_ops,
1644 .parent_names = (const char *[]){ "fclk_div2_div" },
1645 .num_parents = 1,
1646 + /*
1647 + * FIXME: Ethernet with a RGMII PHYs is not working if
1648 + * fclk_div2 is disabled. it is currently unclear why this
1649 + * is. keep it enabled until the Ethernet driver knows how
1650 + * to manage this clock.
1651 + */
1652 + .flags = CLK_IS_CRITICAL,
1653 },
1654 };
1655
1656 diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
1657 index 4e88e980fb76..69a7c756658b 100644
1658 --- a/drivers/clk/renesas/renesas-cpg-mssr.c
1659 +++ b/drivers/clk/renesas/renesas-cpg-mssr.c
1660 @@ -258,8 +258,9 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
1661 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
1662 PTR_ERR(clk));
1663 else
1664 - dev_dbg(dev, "clock (%u, %u) is %pC at %pCr Hz\n",
1665 - clkspec->args[0], clkspec->args[1], clk, clk);
1666 + dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1667 + clkspec->args[0], clkspec->args[1], clk,
1668 + clk_get_rate(clk));
1669 return clk;
1670 }
1671
1672 @@ -326,7 +327,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
1673 if (IS_ERR_OR_NULL(clk))
1674 goto fail;
1675
1676 - dev_dbg(dev, "Core clock %pC at %pCr Hz\n", clk, clk);
1677 + dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1678 priv->clks[id] = clk;
1679 return;
1680
1681 @@ -392,7 +393,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
1682 if (IS_ERR(clk))
1683 goto fail;
1684
1685 - dev_dbg(dev, "Module clock %pC at %pCr Hz\n", clk, clk);
1686 + dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1687 priv->clks[id] = clk;
1688 priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
1689 return;
1690 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
1691 index 17e566afbb41..bd3f0a9d5e60 100644
1692 --- a/drivers/cpufreq/intel_pstate.c
1693 +++ b/drivers/cpufreq/intel_pstate.c
1694 @@ -284,6 +284,7 @@ struct pstate_funcs {
1695 static struct pstate_funcs pstate_funcs __read_mostly;
1696
1697 static int hwp_active __read_mostly;
1698 +static int hwp_mode_bdw __read_mostly;
1699 static bool per_cpu_limits __read_mostly;
1700
1701 static struct cpufreq_driver *intel_pstate_driver __read_mostly;
1702 @@ -1370,7 +1371,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1703 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
1704 cpu->pstate.scaling = pstate_funcs.get_scaling();
1705 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
1706 - cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1707 +
1708 + if (hwp_active && !hwp_mode_bdw) {
1709 + unsigned int phy_max, current_max;
1710 +
1711 + intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
1712 + cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
1713 + } else {
1714 + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1715 + }
1716
1717 if (pstate_funcs.get_aperf_mperf_shift)
1718 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
1719 @@ -2252,28 +2261,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1720 static inline void intel_pstate_request_control_from_smm(void) {}
1721 #endif /* CONFIG_ACPI */
1722
1723 +#define INTEL_PSTATE_HWP_BROADWELL 0x01
1724 +
1725 +#define ICPU_HWP(model, hwp_mode) \
1726 + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
1727 +
1728 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
1729 - { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
1730 + ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
1731 + ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
1732 + ICPU_HWP(X86_MODEL_ANY, 0),
1733 {}
1734 };
1735
1736 static int __init intel_pstate_init(void)
1737 {
1738 + const struct x86_cpu_id *id;
1739 int rc;
1740
1741 if (no_load)
1742 return -ENODEV;
1743
1744 - if (x86_match_cpu(hwp_support_ids)) {
1745 + id = x86_match_cpu(hwp_support_ids);
1746 + if (id) {
1747 copy_cpu_funcs(&core_funcs);
1748 if (!no_hwp) {
1749 hwp_active++;
1750 + hwp_mode_bdw = id->driver_data;
1751 intel_pstate.attr = hwp_cpufreq_attrs;
1752 goto hwp_cpu_matched;
1753 }
1754 } else {
1755 - const struct x86_cpu_id *id;
1756 -
1757 id = x86_match_cpu(intel_pstate_cpu_ids);
1758 if (!id)
1759 return -ENODEV;
1760 diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
1761 index 1a8234e706bc..d29e4f041efe 100644
1762 --- a/drivers/cpuidle/cpuidle-powernv.c
1763 +++ b/drivers/cpuidle/cpuidle-powernv.c
1764 @@ -43,9 +43,31 @@ struct stop_psscr_table {
1765
1766 static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX] __read_mostly;
1767
1768 -static u64 snooze_timeout __read_mostly;
1769 +static u64 default_snooze_timeout __read_mostly;
1770 static bool snooze_timeout_en __read_mostly;
1771
1772 +static u64 get_snooze_timeout(struct cpuidle_device *dev,
1773 + struct cpuidle_driver *drv,
1774 + int index)
1775 +{
1776 + int i;
1777 +
1778 + if (unlikely(!snooze_timeout_en))
1779 + return default_snooze_timeout;
1780 +
1781 + for (i = index + 1; i < drv->state_count; i++) {
1782 + struct cpuidle_state *s = &drv->states[i];
1783 + struct cpuidle_state_usage *su = &dev->states_usage[i];
1784 +
1785 + if (s->disabled || su->disable)
1786 + continue;
1787 +
1788 + return s->target_residency * tb_ticks_per_usec;
1789 + }
1790 +
1791 + return default_snooze_timeout;
1792 +}
1793 +
1794 static int snooze_loop(struct cpuidle_device *dev,
1795 struct cpuidle_driver *drv,
1796 int index)
1797 @@ -56,7 +78,7 @@ static int snooze_loop(struct cpuidle_device *dev,
1798
1799 local_irq_enable();
1800
1801 - snooze_exit_time = get_tb() + snooze_timeout;
1802 + snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
1803 ppc64_runlatch_off();
1804 HMT_very_low();
1805 while (!need_resched()) {
1806 @@ -465,11 +487,9 @@ static int powernv_idle_probe(void)
1807 cpuidle_state_table = powernv_states;
1808 /* Device tree can indicate more idle states */
1809 max_idle_state = powernv_add_idle_states();
1810 - if (max_idle_state > 1) {
1811 + default_snooze_timeout = TICK_USEC * tb_ticks_per_usec;
1812 + if (max_idle_state > 1)
1813 snooze_timeout_en = true;
1814 - snooze_timeout = powernv_states[1].target_residency *
1815 - tb_ticks_per_usec;
1816 - }
1817 } else
1818 return -ENODEV;
1819
1820 diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
1821 index 9d08cea3f1b0..9f5f35362f27 100644
1822 --- a/drivers/firmware/efi/libstub/tpm.c
1823 +++ b/drivers/firmware/efi/libstub/tpm.c
1824 @@ -64,7 +64,7 @@ void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
1825 efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
1826 efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
1827 efi_status_t status;
1828 - efi_physical_addr_t log_location, log_last_entry;
1829 + efi_physical_addr_t log_location = 0, log_last_entry = 0;
1830 struct linux_efi_tpm_eventlog *log_tbl = NULL;
1831 unsigned long first_entry_addr, last_entry_addr;
1832 size_t log_size, last_entry_size;
1833 diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
1834 index 3b73dee6fdc6..e97105ae4158 100644
1835 --- a/drivers/hwmon/k10temp.c
1836 +++ b/drivers/hwmon/k10temp.c
1837 @@ -37,6 +37,10 @@ MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
1838 /* Provide lock for writing to NB_SMU_IND_ADDR */
1839 static DEFINE_MUTEX(nb_smu_ind_mutex);
1840
1841 +#ifndef PCI_DEVICE_ID_AMD_15H_M70H_NB_F3
1842 +#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 0x15b3
1843 +#endif
1844 +
1845 #ifndef PCI_DEVICE_ID_AMD_17H_DF_F3
1846 #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
1847 #endif
1848 @@ -320,6 +324,7 @@ static const struct pci_device_id k10temp_id_table[] = {
1849 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
1850 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
1851 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
1852 + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M70H_NB_F3) },
1853 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
1854 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
1855 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
1856 diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
1857 index 3df0efd69ae3..1147bddb8b2c 100644
1858 --- a/drivers/i2c/algos/i2c-algo-bit.c
1859 +++ b/drivers/i2c/algos/i2c-algo-bit.c
1860 @@ -649,11 +649,6 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
1861 if (bit_adap->getscl == NULL)
1862 adap->quirks = &i2c_bit_quirk_no_clk_stretch;
1863
1864 - /* Bring bus to a known state. Looks like STOP if bus is not free yet */
1865 - setscl(bit_adap, 1);
1866 - udelay(bit_adap->udelay);
1867 - setsda(bit_adap, 1);
1868 -
1869 ret = add_adapter(adap);
1870 if (ret < 0)
1871 return ret;
1872 diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
1873 index 58abb3eced58..20b81bec0b0b 100644
1874 --- a/drivers/i2c/busses/i2c-gpio.c
1875 +++ b/drivers/i2c/busses/i2c-gpio.c
1876 @@ -279,9 +279,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
1877 * required for an I2C bus.
1878 */
1879 if (pdata->scl_is_open_drain)
1880 - gflags = GPIOD_OUT_LOW;
1881 + gflags = GPIOD_OUT_HIGH;
1882 else
1883 - gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
1884 + gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
1885 priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags);
1886 if (IS_ERR(priv->scl))
1887 return PTR_ERR(priv->scl);
1888 diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
1889 index f33dadf7b262..562f125235db 100644
1890 --- a/drivers/iio/accel/sca3000.c
1891 +++ b/drivers/iio/accel/sca3000.c
1892 @@ -1277,7 +1277,7 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev)
1893 {
1894 struct iio_buffer *buffer;
1895
1896 - buffer = iio_kfifo_allocate();
1897 + buffer = devm_iio_kfifo_allocate(&indio_dev->dev);
1898 if (!buffer)
1899 return -ENOMEM;
1900
1901 @@ -1287,11 +1287,6 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev)
1902 return 0;
1903 }
1904
1905 -static void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
1906 -{
1907 - iio_kfifo_free(indio_dev->buffer);
1908 -}
1909 -
1910 static inline
1911 int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
1912 {
1913 @@ -1546,8 +1541,6 @@ static int sca3000_remove(struct spi_device *spi)
1914 if (spi->irq)
1915 free_irq(spi->irq, indio_dev);
1916
1917 - sca3000_unconfigure_ring(indio_dev);
1918 -
1919 return 0;
1920 }
1921
1922 diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
1923 index 70fbf92f9827..03a5f7d6cb0c 100644
1924 --- a/drivers/iio/adc/ad7791.c
1925 +++ b/drivers/iio/adc/ad7791.c
1926 @@ -244,58 +244,9 @@ static int ad7791_read_raw(struct iio_dev *indio_dev,
1927 return -EINVAL;
1928 }
1929
1930 -static const char * const ad7791_sample_freq_avail[] = {
1931 - [AD7791_FILTER_RATE_120] = "120",
1932 - [AD7791_FILTER_RATE_100] = "100",
1933 - [AD7791_FILTER_RATE_33_3] = "33.3",
1934 - [AD7791_FILTER_RATE_20] = "20",
1935 - [AD7791_FILTER_RATE_16_6] = "16.6",
1936 - [AD7791_FILTER_RATE_16_7] = "16.7",
1937 - [AD7791_FILTER_RATE_13_3] = "13.3",
1938 - [AD7791_FILTER_RATE_9_5] = "9.5",
1939 -};
1940 -
1941 -static ssize_t ad7791_read_frequency(struct device *dev,
1942 - struct device_attribute *attr, char *buf)
1943 -{
1944 - struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1945 - struct ad7791_state *st = iio_priv(indio_dev);
1946 - unsigned int rate = st->filter & AD7791_FILTER_RATE_MASK;
1947 -
1948 - return sprintf(buf, "%s\n", ad7791_sample_freq_avail[rate]);
1949 -}
1950 -
1951 -static ssize_t ad7791_write_frequency(struct device *dev,
1952 - struct device_attribute *attr, const char *buf, size_t len)
1953 -{
1954 - struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1955 - struct ad7791_state *st = iio_priv(indio_dev);
1956 - int i, ret;
1957 -
1958 - i = sysfs_match_string(ad7791_sample_freq_avail, buf);
1959 - if (i < 0)
1960 - return i;
1961 -
1962 - ret = iio_device_claim_direct_mode(indio_dev);
1963 - if (ret)
1964 - return ret;
1965 - st->filter &= ~AD7791_FILTER_RATE_MASK;
1966 - st->filter |= i;
1967 - ad_sd_write_reg(&st->sd, AD7791_REG_FILTER, sizeof(st->filter),
1968 - st->filter);
1969 - iio_device_release_direct_mode(indio_dev);
1970 -
1971 - return len;
1972 -}
1973 -
1974 -static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
1975 - ad7791_read_frequency,
1976 - ad7791_write_frequency);
1977 -
1978 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("120 100 33.3 20 16.7 16.6 13.3 9.5");
1979
1980 static struct attribute *ad7791_attributes[] = {
1981 - &iio_dev_attr_sampling_frequency.dev_attr.attr,
1982 &iio_const_attr_sampling_frequency_available.dev_attr.attr,
1983 NULL
1984 };
1985 diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
1986 index 2b6c9b516070..d76455edd292 100644
1987 --- a/drivers/infiniband/core/umem.c
1988 +++ b/drivers/infiniband/core/umem.c
1989 @@ -119,16 +119,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
1990 umem->length = size;
1991 umem->address = addr;
1992 umem->page_shift = PAGE_SHIFT;
1993 - /*
1994 - * We ask for writable memory if any of the following
1995 - * access flags are set. "Local write" and "remote write"
1996 - * obviously require write access. "Remote atomic" can do
1997 - * things like fetch and add, which will modify memory, and
1998 - * "MW bind" can change permissions by binding a window.
1999 - */
2000 - umem->writable = !!(access &
2001 - (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
2002 - IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
2003 + umem->writable = ib_access_writable(access);
2004
2005 if (access & IB_ACCESS_ON_DEMAND) {
2006 ret = ib_umem_odp_get(context, umem, access);
2007 diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
2008 index 4445d8ee9314..2d34a9c827b7 100644
2009 --- a/drivers/infiniband/core/uverbs_main.c
2010 +++ b/drivers/infiniband/core/uverbs_main.c
2011 @@ -734,10 +734,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
2012 if (ret)
2013 return ret;
2014
2015 - if (!file->ucontext &&
2016 - (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended))
2017 - return -EINVAL;
2018 -
2019 if (extended) {
2020 if (count < (sizeof(hdr) + sizeof(ex_hdr)))
2021 return -EINVAL;
2022 @@ -757,6 +753,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
2023 goto out;
2024 }
2025
2026 + /*
2027 + * Must be after the ib_dev check, as once the RCU clears ib_dev ==
2028 + * NULL means ucontext == NULL
2029 + */
2030 + if (!file->ucontext &&
2031 + (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) {
2032 + ret = -EINVAL;
2033 + goto out;
2034 + }
2035 +
2036 if (!verify_command_mask(ib_dev, command, extended)) {
2037 ret = -EOPNOTSUPP;
2038 goto out;
2039 diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
2040 index 6ddfb1fade79..def3bc1e6447 100644
2041 --- a/drivers/infiniband/core/verbs.c
2042 +++ b/drivers/infiniband/core/verbs.c
2043 @@ -1562,11 +1562,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
2044
2045 /* Completion queues */
2046
2047 -struct ib_cq *ib_create_cq(struct ib_device *device,
2048 - ib_comp_handler comp_handler,
2049 - void (*event_handler)(struct ib_event *, void *),
2050 - void *cq_context,
2051 - const struct ib_cq_init_attr *cq_attr)
2052 +struct ib_cq *__ib_create_cq(struct ib_device *device,
2053 + ib_comp_handler comp_handler,
2054 + void (*event_handler)(struct ib_event *, void *),
2055 + void *cq_context,
2056 + const struct ib_cq_init_attr *cq_attr,
2057 + const char *caller)
2058 {
2059 struct ib_cq *cq;
2060
2061 @@ -1580,12 +1581,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
2062 cq->cq_context = cq_context;
2063 atomic_set(&cq->usecnt, 0);
2064 cq->res.type = RDMA_RESTRACK_CQ;
2065 + cq->res.kern_name = caller;
2066 rdma_restrack_add(&cq->res);
2067 }
2068
2069 return cq;
2070 }
2071 -EXPORT_SYMBOL(ib_create_cq);
2072 +EXPORT_SYMBOL(__ib_create_cq);
2073
2074 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2075 {
2076 diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
2077 index e6bdd0c1e80a..ebccc4c84827 100644
2078 --- a/drivers/infiniband/hw/hfi1/chip.c
2079 +++ b/drivers/infiniband/hw/hfi1/chip.c
2080 @@ -6829,7 +6829,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
2081 }
2082 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
2083 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
2084 - rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
2085 + rcvmask |= rcd->rcvhdrtail_kvaddr ?
2086 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
2087 hfi1_rcvctrl(dd, rcvmask, rcd);
2088 hfi1_rcd_put(rcd);
2089 @@ -8355,7 +8355,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
2090 u32 tail;
2091 int present;
2092
2093 - if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
2094 + if (!rcd->rcvhdrtail_kvaddr)
2095 present = (rcd->seq_cnt ==
2096 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
2097 else /* is RDMA rtail */
2098 @@ -11823,7 +11823,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
2099 /* reset the tail and hdr addresses, and sequence count */
2100 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
2101 rcd->rcvhdrq_dma);
2102 - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
2103 + if (rcd->rcvhdrtail_kvaddr)
2104 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
2105 rcd->rcvhdrqtailaddr_dma);
2106 rcd->seq_cnt = 1;
2107 @@ -11903,7 +11903,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
2108 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
2109 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
2110 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
2111 - if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
2112 + if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
2113 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
2114 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
2115 /* See comment on RcvCtxtCtrl.TailUpd above */
2116 diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
2117 index 852173bf05d0..5343960610fe 100644
2118 --- a/drivers/infiniband/hw/hfi1/debugfs.c
2119 +++ b/drivers/infiniband/hw/hfi1/debugfs.c
2120 @@ -1227,7 +1227,8 @@ DEBUGFS_FILE_OPS(fault_stats);
2121
2122 static void fault_exit_opcode_debugfs(struct hfi1_ibdev *ibd)
2123 {
2124 - debugfs_remove_recursive(ibd->fault_opcode->dir);
2125 + if (ibd->fault_opcode)
2126 + debugfs_remove_recursive(ibd->fault_opcode->dir);
2127 kfree(ibd->fault_opcode);
2128 ibd->fault_opcode = NULL;
2129 }
2130 @@ -1255,6 +1256,7 @@ static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd)
2131 &ibd->fault_opcode->attr);
2132 if (IS_ERR(ibd->fault_opcode->dir)) {
2133 kfree(ibd->fault_opcode);
2134 + ibd->fault_opcode = NULL;
2135 return -ENOENT;
2136 }
2137
2138 @@ -1278,7 +1280,8 @@ static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd)
2139
2140 static void fault_exit_packet_debugfs(struct hfi1_ibdev *ibd)
2141 {
2142 - debugfs_remove_recursive(ibd->fault_packet->dir);
2143 + if (ibd->fault_packet)
2144 + debugfs_remove_recursive(ibd->fault_packet->dir);
2145 kfree(ibd->fault_packet);
2146 ibd->fault_packet = NULL;
2147 }
2148 @@ -1304,6 +1307,7 @@ static int fault_init_packet_debugfs(struct hfi1_ibdev *ibd)
2149 &ibd->fault_opcode->attr);
2150 if (IS_ERR(ibd->fault_packet->dir)) {
2151 kfree(ibd->fault_packet);
2152 + ibd->fault_packet = NULL;
2153 return -ENOENT;
2154 }
2155
2156 diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
2157 index da4aa1a95b11..cf25913bd81c 100644
2158 --- a/drivers/infiniband/hw/hfi1/file_ops.c
2159 +++ b/drivers/infiniband/hw/hfi1/file_ops.c
2160 @@ -505,7 +505,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
2161 ret = -EINVAL;
2162 goto done;
2163 }
2164 - if (flags & VM_WRITE) {
2165 + if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) {
2166 ret = -EPERM;
2167 goto done;
2168 }
2169 @@ -689,8 +689,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
2170 * checks to default and disable the send context.
2171 */
2172 if (uctxt->sc) {
2173 - set_pio_integrity(uctxt->sc);
2174 sc_disable(uctxt->sc);
2175 + set_pio_integrity(uctxt->sc);
2176 }
2177
2178 hfi1_free_ctxt_rcv_groups(uctxt);
2179 diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
2180 index cac2c62bc42d..9c97c180c35e 100644
2181 --- a/drivers/infiniband/hw/hfi1/hfi.h
2182 +++ b/drivers/infiniband/hw/hfi1/hfi.h
2183 @@ -1856,6 +1856,7 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
2184 #define HFI1_HAS_SDMA_TIMEOUT 0x8
2185 #define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
2186 #define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
2187 +#define HFI1_SHUTDOWN 0x100 /* device is shutting down */
2188
2189 /* IB dword length mask in PBC (lower 11 bits); same for all chips */
2190 #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
2191 diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
2192 index 6309edf811df..92e802a64fc4 100644
2193 --- a/drivers/infiniband/hw/hfi1/init.c
2194 +++ b/drivers/infiniband/hw/hfi1/init.c
2195 @@ -1058,6 +1058,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
2196 unsigned pidx;
2197 int i;
2198
2199 + if (dd->flags & HFI1_SHUTDOWN)
2200 + return;
2201 + dd->flags |= HFI1_SHUTDOWN;
2202 +
2203 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2204 ppd = dd->pport + pidx;
2205
2206 @@ -1391,6 +1395,7 @@ void hfi1_disable_after_error(struct hfi1_devdata *dd)
2207
2208 static void remove_one(struct pci_dev *);
2209 static int init_one(struct pci_dev *, const struct pci_device_id *);
2210 +static void shutdown_one(struct pci_dev *);
2211
2212 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
2213 #define PFX DRIVER_NAME ": "
2214 @@ -1407,6 +1412,7 @@ static struct pci_driver hfi1_pci_driver = {
2215 .name = DRIVER_NAME,
2216 .probe = init_one,
2217 .remove = remove_one,
2218 + .shutdown = shutdown_one,
2219 .id_table = hfi1_pci_tbl,
2220 .err_handler = &hfi1_pci_err_handler,
2221 };
2222 @@ -1816,6 +1822,13 @@ static void remove_one(struct pci_dev *pdev)
2223 postinit_cleanup(dd);
2224 }
2225
2226 +static void shutdown_one(struct pci_dev *pdev)
2227 +{
2228 + struct hfi1_devdata *dd = pci_get_drvdata(pdev);
2229 +
2230 + shutdown_device(dd);
2231 +}
2232 +
2233 /**
2234 * hfi1_create_rcvhdrq - create a receive header queue
2235 * @dd: the hfi1_ib device
2236 @@ -1831,7 +1844,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
2237 u64 reg;
2238
2239 if (!rcd->rcvhdrq) {
2240 - dma_addr_t dma_hdrqtail;
2241 gfp_t gfp_flags;
2242
2243 /*
2244 @@ -1856,13 +1868,13 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
2245 goto bail;
2246 }
2247
2248 - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
2249 + if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
2250 + HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
2251 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
2252 - &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
2253 - gfp_flags);
2254 + &dd->pcidev->dev, PAGE_SIZE,
2255 + &rcd->rcvhdrqtailaddr_dma, gfp_flags);
2256 if (!rcd->rcvhdrtail_kvaddr)
2257 goto bail_free;
2258 - rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
2259 }
2260
2261 rcd->rcvhdrq_size = amt;
2262 diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
2263 index 40dac4d16eb8..9cac15d10c4f 100644
2264 --- a/drivers/infiniband/hw/hfi1/pio.c
2265 +++ b/drivers/infiniband/hw/hfi1/pio.c
2266 @@ -50,8 +50,6 @@
2267 #include "qp.h"
2268 #include "trace.h"
2269
2270 -#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
2271 -
2272 #define SC(name) SEND_CTXT_##name
2273 /*
2274 * Send Context functions
2275 @@ -961,15 +959,40 @@ void sc_disable(struct send_context *sc)
2276 }
2277
2278 /* return SendEgressCtxtStatus.PacketOccupancy */
2279 -#define packet_occupancy(r) \
2280 - (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
2281 - >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
2282 +static u64 packet_occupancy(u64 reg)
2283 +{
2284 + return (reg &
2285 + SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)
2286 + >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT;
2287 +}
2288
2289 /* is egress halted on the context? */
2290 -#define egress_halted(r) \
2291 - ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
2292 +static bool egress_halted(u64 reg)
2293 +{
2294 + return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK);
2295 +}
2296
2297 -/* wait for packet egress, optionally pause for credit return */
2298 +/* is the send context halted? */
2299 +static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
2300 +{
2301 + return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) &
2302 + SC(STATUS_CTXT_HALTED_SMASK));
2303 +}
2304 +
2305 +/**
2306 + * sc_wait_for_packet_egress
2307 + * @sc: valid send context
2308 + * @pause: wait for credit return
2309 + *
2310 + * Wait for packet egress, optionally pause for credit return
2311 + *
2312 + * Egress halt and Context halt are not necessarily the same thing, so
2313 + * check for both.
2314 + *
2315 + * NOTE: The context halt bit may not be set immediately. Because of this,
2316 + * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
2317 + * context bit to determine if the context is halted.
2318 + */
2319 static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
2320 {
2321 struct hfi1_devdata *dd = sc->dd;
2322 @@ -981,8 +1004,9 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
2323 reg_prev = reg;
2324 reg = read_csr(dd, sc->hw_context * 8 +
2325 SEND_EGRESS_CTXT_STATUS);
2326 - /* done if egress is stopped */
2327 - if (egress_halted(reg))
2328 + /* done if any halt bits, SW or HW are set */
2329 + if (sc->flags & SCF_HALTED ||
2330 + is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
2331 break;
2332 reg = packet_occupancy(reg);
2333 if (reg == 0)
2334 diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
2335 index 0793a21d76f4..d604b3d5aa3e 100644
2336 --- a/drivers/infiniband/hw/mlx4/mad.c
2337 +++ b/drivers/infiniband/hw/mlx4/mad.c
2338 @@ -1934,7 +1934,6 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
2339 "buf:%lld\n", wc.wr_id);
2340 break;
2341 default:
2342 - BUG_ON(1);
2343 break;
2344 }
2345 } else {
2346 diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
2347 index 61d8b06375bb..ed1f253faf97 100644
2348 --- a/drivers/infiniband/hw/mlx4/mr.c
2349 +++ b/drivers/infiniband/hw/mlx4/mr.c
2350 @@ -367,6 +367,40 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
2351 return block_shift;
2352 }
2353
2354 +static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
2355 + u64 length, u64 virt_addr,
2356 + int access_flags)
2357 +{
2358 + /*
2359 + * Force registering the memory as writable if the underlying pages
2360 + * are writable. This is so rereg can change the access permissions
2361 + * from readable to writable without having to run through ib_umem_get
2362 + * again
2363 + */
2364 + if (!ib_access_writable(access_flags)) {
2365 + struct vm_area_struct *vma;
2366 +
2367 + down_read(&current->mm->mmap_sem);
2368 + /*
2369 + * FIXME: Ideally this would iterate over all the vmas that
2370 + * cover the memory, but for now it requires a single vma to
2371 + * entirely cover the MR to support RO mappings.
2372 + */
2373 + vma = find_vma(current->mm, start);
2374 + if (vma && vma->vm_end >= start + length &&
2375 + vma->vm_start <= start) {
2376 + if (vma->vm_flags & VM_WRITE)
2377 + access_flags |= IB_ACCESS_LOCAL_WRITE;
2378 + } else {
2379 + access_flags |= IB_ACCESS_LOCAL_WRITE;
2380 + }
2381 +
2382 + up_read(&current->mm->mmap_sem);
2383 + }
2384 +
2385 + return ib_umem_get(context, start, length, access_flags, 0);
2386 +}
2387 +
2388 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2389 u64 virt_addr, int access_flags,
2390 struct ib_udata *udata)
2391 @@ -381,10 +415,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2392 if (!mr)
2393 return ERR_PTR(-ENOMEM);
2394
2395 - /* Force registering the memory as writable. */
2396 - /* Used for memory re-registeration. HCA protects the access */
2397 - mr->umem = ib_umem_get(pd->uobject->context, start, length,
2398 - access_flags | IB_ACCESS_LOCAL_WRITE, 0);
2399 + mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
2400 + virt_addr, access_flags);
2401 if (IS_ERR(mr->umem)) {
2402 err = PTR_ERR(mr->umem);
2403 goto err_free;
2404 @@ -454,6 +486,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
2405 }
2406
2407 if (flags & IB_MR_REREG_ACCESS) {
2408 + if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
2409 + return -EPERM;
2410 +
2411 err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
2412 convert_access(mr_access_flags));
2413
2414 @@ -467,10 +502,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
2415
2416 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
2417 ib_umem_release(mmr->umem);
2418 - mmr->umem = ib_umem_get(mr->uobject->context, start, length,
2419 - mr_access_flags |
2420 - IB_ACCESS_LOCAL_WRITE,
2421 - 0);
2422 + mmr->umem =
2423 + mlx4_get_umem_mr(mr->uobject->context, start, length,
2424 + virt_addr, mr_access_flags);
2425 if (IS_ERR(mmr->umem)) {
2426 err = PTR_ERR(mmr->umem);
2427 /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
2428 diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
2429 index 77d257ec899b..9f6bc34cd4db 100644
2430 --- a/drivers/infiniband/hw/mlx5/cq.c
2431 +++ b/drivers/infiniband/hw/mlx5/cq.c
2432 @@ -637,7 +637,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
2433 }
2434
2435 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
2436 - struct ib_wc *wc)
2437 + struct ib_wc *wc, bool is_fatal_err)
2438 {
2439 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
2440 struct mlx5_ib_wc *soft_wc, *next;
2441 @@ -650,6 +650,10 @@ static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
2442 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
2443 cq->mcq.cqn);
2444
2445 + if (unlikely(is_fatal_err)) {
2446 + soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
2447 + soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
2448 + }
2449 wc[npolled++] = soft_wc->wc;
2450 list_del(&soft_wc->list);
2451 kfree(soft_wc);
2452 @@ -670,12 +674,17 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2453
2454 spin_lock_irqsave(&cq->lock, flags);
2455 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
2456 - mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
2457 + /* make sure no soft wqe's are waiting */
2458 + if (unlikely(!list_empty(&cq->wc_list)))
2459 + soft_polled = poll_soft_wc(cq, num_entries, wc, true);
2460 +
2461 + mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
2462 + wc + soft_polled, &npolled);
2463 goto out;
2464 }
2465
2466 if (unlikely(!list_empty(&cq->wc_list)))
2467 - soft_polled = poll_soft_wc(cq, num_entries, wc);
2468 + soft_polled = poll_soft_wc(cq, num_entries, wc, false);
2469
2470 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
2471 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
2472 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
2473 index 46072455130c..3461df002f81 100644
2474 --- a/drivers/infiniband/hw/qib/qib.h
2475 +++ b/drivers/infiniband/hw/qib/qib.h
2476 @@ -1228,6 +1228,7 @@ static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
2477 #define QIB_BADINTR 0x8000 /* severe interrupt problems */
2478 #define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
2479 #define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
2480 +#define QIB_SHUTDOWN 0x40000 /* device is shutting down */
2481
2482 /*
2483 * values for ppd->lflags (_ib_port_ related flags)
2484 @@ -1423,8 +1424,7 @@ u64 qib_sps_ints(void);
2485 /*
2486 * dma_addr wrappers - all 0's invalid for hw
2487 */
2488 -dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
2489 - size_t, int);
2490 +int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
2491 struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
2492
2493 /*
2494 diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
2495 index 6a8800b65047..49c8e926cc64 100644
2496 --- a/drivers/infiniband/hw/qib/qib_file_ops.c
2497 +++ b/drivers/infiniband/hw/qib/qib_file_ops.c
2498 @@ -364,6 +364,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
2499 goto done;
2500 }
2501 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
2502 + dma_addr_t daddr;
2503 +
2504 for (; ntids--; tid++) {
2505 if (tid == tidcnt)
2506 tid = 0;
2507 @@ -380,12 +382,14 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
2508 ret = -ENOMEM;
2509 break;
2510 }
2511 + ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
2512 + if (ret)
2513 + break;
2514 +
2515 tidlist[i] = tid + tidoff;
2516 /* we "know" system pages and TID pages are same size */
2517 dd->pageshadow[ctxttid + tid] = pagep[i];
2518 - dd->physshadow[ctxttid + tid] =
2519 - qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
2520 - PCI_DMA_FROMDEVICE);
2521 + dd->physshadow[ctxttid + tid] = daddr;
2522 /*
2523 * don't need atomic or it's overhead
2524 */
2525 diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
2526 index 6c68f8a97018..015520289735 100644
2527 --- a/drivers/infiniband/hw/qib/qib_init.c
2528 +++ b/drivers/infiniband/hw/qib/qib_init.c
2529 @@ -841,6 +841,10 @@ static void qib_shutdown_device(struct qib_devdata *dd)
2530 struct qib_pportdata *ppd;
2531 unsigned pidx;
2532
2533 + if (dd->flags & QIB_SHUTDOWN)
2534 + return;
2535 + dd->flags |= QIB_SHUTDOWN;
2536 +
2537 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2538 ppd = dd->pport + pidx;
2539
2540 @@ -1182,6 +1186,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
2541
2542 static void qib_remove_one(struct pci_dev *);
2543 static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
2544 +static void qib_shutdown_one(struct pci_dev *);
2545
2546 #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
2547 #define PFX QIB_DRV_NAME ": "
2548 @@ -1199,6 +1204,7 @@ static struct pci_driver qib_driver = {
2549 .name = QIB_DRV_NAME,
2550 .probe = qib_init_one,
2551 .remove = qib_remove_one,
2552 + .shutdown = qib_shutdown_one,
2553 .id_table = qib_pci_tbl,
2554 .err_handler = &qib_pci_err_handler,
2555 };
2556 @@ -1549,6 +1555,13 @@ static void qib_remove_one(struct pci_dev *pdev)
2557 qib_postinit_cleanup(dd);
2558 }
2559
2560 +static void qib_shutdown_one(struct pci_dev *pdev)
2561 +{
2562 + struct qib_devdata *dd = pci_get_drvdata(pdev);
2563 +
2564 + qib_shutdown_device(dd);
2565 +}
2566 +
2567 /**
2568 * qib_create_rcvhdrq - create a receive header queue
2569 * @dd: the qlogic_ib device
2570 diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
2571 index ce83ba9a12ef..16543d5e80c3 100644
2572 --- a/drivers/infiniband/hw/qib/qib_user_pages.c
2573 +++ b/drivers/infiniband/hw/qib/qib_user_pages.c
2574 @@ -99,23 +99,27 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
2575 *
2576 * I'm sure we won't be so lucky with other iommu's, so FIXME.
2577 */
2578 -dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
2579 - unsigned long offset, size_t size, int direction)
2580 +int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
2581 {
2582 dma_addr_t phys;
2583
2584 - phys = pci_map_page(hwdev, page, offset, size, direction);
2585 + phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2586 + if (pci_dma_mapping_error(hwdev, phys))
2587 + return -ENOMEM;
2588
2589 - if (phys == 0) {
2590 - pci_unmap_page(hwdev, phys, size, direction);
2591 - phys = pci_map_page(hwdev, page, offset, size, direction);
2592 + if (!phys) {
2593 + pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2594 + phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
2595 + PCI_DMA_FROMDEVICE);
2596 + if (pci_dma_mapping_error(hwdev, phys))
2597 + return -ENOMEM;
2598 /*
2599 * FIXME: If we get 0 again, we should keep this page,
2600 * map another, then free the 0 page.
2601 */
2602 }
2603 -
2604 - return phys;
2605 + *daddr = phys;
2606 + return 0;
2607 }
2608
2609 /**
2610 diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
2611 index fb52b669bfce..340c17aba3b0 100644
2612 --- a/drivers/infiniband/sw/rdmavt/cq.c
2613 +++ b/drivers/infiniband/sw/rdmavt/cq.c
2614 @@ -120,17 +120,20 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
2615 if (cq->notify == IB_CQ_NEXT_COMP ||
2616 (cq->notify == IB_CQ_SOLICITED &&
2617 (solicited || entry->status != IB_WC_SUCCESS))) {
2618 + struct kthread_worker *worker;
2619 +
2620 /*
2621 * This will cause send_complete() to be called in
2622 * another thread.
2623 */
2624 - spin_lock(&cq->rdi->n_cqs_lock);
2625 - if (likely(cq->rdi->worker)) {
2626 + rcu_read_lock();
2627 + worker = rcu_dereference(cq->rdi->worker);
2628 + if (likely(worker)) {
2629 cq->notify = RVT_CQ_NONE;
2630 cq->triggered++;
2631 - kthread_queue_work(cq->rdi->worker, &cq->comptask);
2632 + kthread_queue_work(worker, &cq->comptask);
2633 }
2634 - spin_unlock(&cq->rdi->n_cqs_lock);
2635 + rcu_read_unlock();
2636 }
2637
2638 spin_unlock_irqrestore(&cq->lock, flags);
2639 @@ -512,7 +515,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
2640 int cpu;
2641 struct kthread_worker *worker;
2642
2643 - if (rdi->worker)
2644 + if (rcu_access_pointer(rdi->worker))
2645 return 0;
2646
2647 spin_lock_init(&rdi->n_cqs_lock);
2648 @@ -524,7 +527,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
2649 return PTR_ERR(worker);
2650
2651 set_user_nice(worker->task, MIN_NICE);
2652 - rdi->worker = worker;
2653 + RCU_INIT_POINTER(rdi->worker, worker);
2654 return 0;
2655 }
2656
2657 @@ -536,15 +539,19 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
2658 {
2659 struct kthread_worker *worker;
2660
2661 - /* block future queuing from send_complete() */
2662 - spin_lock_irq(&rdi->n_cqs_lock);
2663 - worker = rdi->worker;
2664 + if (!rcu_access_pointer(rdi->worker))
2665 + return;
2666 +
2667 + spin_lock(&rdi->n_cqs_lock);
2668 + worker = rcu_dereference_protected(rdi->worker,
2669 + lockdep_is_held(&rdi->n_cqs_lock));
2670 if (!worker) {
2671 - spin_unlock_irq(&rdi->n_cqs_lock);
2672 + spin_unlock(&rdi->n_cqs_lock);
2673 return;
2674 }
2675 - rdi->worker = NULL;
2676 - spin_unlock_irq(&rdi->n_cqs_lock);
2677 + RCU_INIT_POINTER(rdi->worker, NULL);
2678 + spin_unlock(&rdi->n_cqs_lock);
2679 + synchronize_rcu();
2680
2681 kthread_destroy_worker(worker);
2682 }
2683 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
2684 index fff40b097947..3130698fee70 100644
2685 --- a/drivers/infiniband/ulp/isert/ib_isert.c
2686 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
2687 @@ -886,15 +886,9 @@ isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_des
2688 }
2689
2690 static void
2691 -isert_create_send_desc(struct isert_conn *isert_conn,
2692 - struct isert_cmd *isert_cmd,
2693 - struct iser_tx_desc *tx_desc)
2694 +__isert_create_send_desc(struct isert_device *device,
2695 + struct iser_tx_desc *tx_desc)
2696 {
2697 - struct isert_device *device = isert_conn->device;
2698 - struct ib_device *ib_dev = device->ib_device;
2699 -
2700 - ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
2701 - ISER_HEADERS_LEN, DMA_TO_DEVICE);
2702
2703 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
2704 tx_desc->iser_header.flags = ISCSI_CTRL;
2705 @@ -907,6 +901,20 @@ isert_create_send_desc(struct isert_conn *isert_conn,
2706 }
2707 }
2708
2709 +static void
2710 +isert_create_send_desc(struct isert_conn *isert_conn,
2711 + struct isert_cmd *isert_cmd,
2712 + struct iser_tx_desc *tx_desc)
2713 +{
2714 + struct isert_device *device = isert_conn->device;
2715 + struct ib_device *ib_dev = device->ib_device;
2716 +
2717 + ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
2718 + ISER_HEADERS_LEN, DMA_TO_DEVICE);
2719 +
2720 + __isert_create_send_desc(device, tx_desc);
2721 +}
2722 +
2723 static int
2724 isert_init_tx_hdrs(struct isert_conn *isert_conn,
2725 struct iser_tx_desc *tx_desc)
2726 @@ -994,7 +1002,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
2727 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
2728 int ret;
2729
2730 - isert_create_send_desc(isert_conn, NULL, tx_desc);
2731 + __isert_create_send_desc(device, tx_desc);
2732
2733 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
2734 sizeof(struct iscsi_hdr));
2735 @@ -2108,7 +2116,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2736
2737 sig_attrs->check_mask =
2738 (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2739 - (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2740 + (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG ? 0x30 : 0) |
2741 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2742 return 0;
2743 }
2744 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
2745 index a89b81b35932..62f9c23d8a7f 100644
2746 --- a/drivers/input/joystick/xpad.c
2747 +++ b/drivers/input/joystick/xpad.c
2748 @@ -123,7 +123,7 @@ static const struct xpad_device {
2749 u8 mapping;
2750 u8 xtype;
2751 } xpad_device[] = {
2752 - { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
2753 + { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
2754 { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
2755 { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
2756 { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
2757 diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
2758 index 599544c1a91c..243e0fa6e3e3 100644
2759 --- a/drivers/input/mouse/elan_i2c.h
2760 +++ b/drivers/input/mouse/elan_i2c.h
2761 @@ -27,6 +27,8 @@
2762 #define ETP_DISABLE_POWER 0x0001
2763 #define ETP_PRESSURE_OFFSET 25
2764
2765 +#define ETP_CALIBRATE_MAX_LEN 3
2766 +
2767 /* IAP Firmware handling */
2768 #define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
2769 #define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
2770 diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
2771 index 93967c8139e7..37f954b704a6 100644
2772 --- a/drivers/input/mouse/elan_i2c_core.c
2773 +++ b/drivers/input/mouse/elan_i2c_core.c
2774 @@ -610,7 +610,7 @@ static ssize_t calibrate_store(struct device *dev,
2775 int tries = 20;
2776 int retval;
2777 int error;
2778 - u8 val[3];
2779 + u8 val[ETP_CALIBRATE_MAX_LEN];
2780
2781 retval = mutex_lock_interruptible(&data->sysfs_mutex);
2782 if (retval)
2783 @@ -1263,6 +1263,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
2784 { "ELAN060C", 0 },
2785 { "ELAN0611", 0 },
2786 { "ELAN0612", 0 },
2787 + { "ELAN0618", 0 },
2788 { "ELAN1000", 0 },
2789 { }
2790 };
2791 diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
2792 index cfcb32559925..c060d270bc4d 100644
2793 --- a/drivers/input/mouse/elan_i2c_smbus.c
2794 +++ b/drivers/input/mouse/elan_i2c_smbus.c
2795 @@ -56,7 +56,7 @@
2796 static int elan_smbus_initialize(struct i2c_client *client)
2797 {
2798 u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
2799 - u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
2800 + u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
2801 int len, error;
2802
2803 /* Get hello packet */
2804 @@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
2805 static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
2806 {
2807 int error;
2808 + u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
2809 +
2810 + BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
2811
2812 error = i2c_smbus_read_block_data(client,
2813 - ETP_SMBUS_CALIBRATE_QUERY, val);
2814 + ETP_SMBUS_CALIBRATE_QUERY, buf);
2815 if (error < 0)
2816 return error;
2817
2818 + memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
2819 return 0;
2820 }
2821
2822 @@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
2823 {
2824 int len;
2825
2826 + BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
2827 +
2828 len = i2c_smbus_read_block_data(client,
2829 ETP_SMBUS_PACKET_QUERY,
2830 &report[ETP_SMBUS_REPORT_OFFSET]);
2831 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
2832 index db47a5e1d114..b68019109e99 100644
2833 --- a/drivers/input/mouse/elantech.c
2834 +++ b/drivers/input/mouse/elantech.c
2835 @@ -796,7 +796,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
2836 else if (ic_version == 7 && etd->samples[1] == 0x2A)
2837 sanity_check = ((packet[3] & 0x1c) == 0x10);
2838 else
2839 - sanity_check = ((packet[0] & 0x0c) == 0x04 &&
2840 + sanity_check = ((packet[0] & 0x08) == 0x00 &&
2841 (packet[3] & 0x1c) == 0x10);
2842
2843 if (!sanity_check)
2844 @@ -1169,6 +1169,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
2845 { }
2846 };
2847
2848 +static const char * const middle_button_pnp_ids[] = {
2849 + "LEN2131", /* ThinkPad P52 w/ NFC */
2850 + "LEN2132", /* ThinkPad P52 */
2851 + NULL
2852 +};
2853 +
2854 /*
2855 * Set the appropriate event bits for the input subsystem
2856 */
2857 @@ -1188,7 +1194,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
2858 __clear_bit(EV_REL, dev->evbit);
2859
2860 __set_bit(BTN_LEFT, dev->keybit);
2861 - if (dmi_check_system(elantech_dmi_has_middle_button))
2862 + if (dmi_check_system(elantech_dmi_has_middle_button) ||
2863 + psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
2864 __set_bit(BTN_MIDDLE, dev->keybit);
2865 __set_bit(BTN_RIGHT, dev->keybit);
2866
2867 diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
2868 index 8900c3166ebf..47ed5616d026 100644
2869 --- a/drivers/input/mouse/psmouse-base.c
2870 +++ b/drivers/input/mouse/psmouse-base.c
2871 @@ -192,8 +192,8 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
2872 else
2873 input_report_rel(dev, REL_WHEEL, -wheel);
2874
2875 - input_report_key(dev, BTN_SIDE, BIT(4));
2876 - input_report_key(dev, BTN_EXTRA, BIT(5));
2877 + input_report_key(dev, BTN_SIDE, packet[3] & BIT(4));
2878 + input_report_key(dev, BTN_EXTRA, packet[3] & BIT(5));
2879 break;
2880 }
2881 break;
2882 @@ -203,13 +203,13 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
2883 input_report_rel(dev, REL_WHEEL, -(s8) packet[3]);
2884
2885 /* Extra buttons on Genius NewNet 3D */
2886 - input_report_key(dev, BTN_SIDE, BIT(6));
2887 - input_report_key(dev, BTN_EXTRA, BIT(7));
2888 + input_report_key(dev, BTN_SIDE, packet[0] & BIT(6));
2889 + input_report_key(dev, BTN_EXTRA, packet[0] & BIT(7));
2890 break;
2891
2892 case PSMOUSE_THINKPS:
2893 /* Extra button on ThinkingMouse */
2894 - input_report_key(dev, BTN_EXTRA, BIT(3));
2895 + input_report_key(dev, BTN_EXTRA, packet[0] & BIT(3));
2896
2897 /*
2898 * Without this bit of weirdness moving up gives wildly
2899 @@ -223,7 +223,7 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
2900 * Cortron PS2 Trackball reports SIDE button in the
2901 * 4th bit of the first byte.
2902 */
2903 - input_report_key(dev, BTN_SIDE, BIT(3));
2904 + input_report_key(dev, BTN_SIDE, packet[0] & BIT(3));
2905 packet[0] |= BIT(3);
2906 break;
2907
2908 diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
2909 index ff7043f74a3d..d196ac3d8b8c 100644
2910 --- a/drivers/input/touchscreen/silead.c
2911 +++ b/drivers/input/touchscreen/silead.c
2912 @@ -603,6 +603,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = {
2913 { "GSL3692", 0 },
2914 { "MSSL1680", 0 },
2915 { "MSSL0001", 0 },
2916 + { "MSSL0002", 0 },
2917 { }
2918 };
2919 MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
2920 diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
2921 index df171cb85822..b38798cc5288 100644
2922 --- a/drivers/iommu/Kconfig
2923 +++ b/drivers/iommu/Kconfig
2924 @@ -107,7 +107,6 @@ config IOMMU_PGTABLES_L2
2925 # AMD IOMMU support
2926 config AMD_IOMMU
2927 bool "AMD IOMMU support"
2928 - select DMA_DIRECT_OPS
2929 select SWIOTLB
2930 select PCI_MSI
2931 select PCI_ATS
2932 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
2933 index b0b30a568db7..12c1491a1a9a 100644
2934 --- a/drivers/iommu/amd_iommu.c
2935 +++ b/drivers/iommu/amd_iommu.c
2936 @@ -2593,32 +2593,51 @@ static void *alloc_coherent(struct device *dev, size_t size,
2937 unsigned long attrs)
2938 {
2939 u64 dma_mask = dev->coherent_dma_mask;
2940 - struct protection_domain *domain = get_domain(dev);
2941 - bool is_direct = false;
2942 - void *virt_addr;
2943 + struct protection_domain *domain;
2944 + struct dma_ops_domain *dma_dom;
2945 + struct page *page;
2946 +
2947 + domain = get_domain(dev);
2948 + if (PTR_ERR(domain) == -EINVAL) {
2949 + page = alloc_pages(flag, get_order(size));
2950 + *dma_addr = page_to_phys(page);
2951 + return page_address(page);
2952 + } else if (IS_ERR(domain))
2953 + return NULL;
2954
2955 - if (IS_ERR(domain)) {
2956 - if (PTR_ERR(domain) != -EINVAL)
2957 + dma_dom = to_dma_ops_domain(domain);
2958 + size = PAGE_ALIGN(size);
2959 + dma_mask = dev->coherent_dma_mask;
2960 + flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2961 + flag |= __GFP_ZERO;
2962 +
2963 + page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
2964 + if (!page) {
2965 + if (!gfpflags_allow_blocking(flag))
2966 return NULL;
2967 - is_direct = true;
2968 - }
2969
2970 - virt_addr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
2971 - if (!virt_addr || is_direct)
2972 - return virt_addr;
2973 + page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
2974 + get_order(size), flag);
2975 + if (!page)
2976 + return NULL;
2977 + }
2978
2979 if (!dma_mask)
2980 dma_mask = *dev->dma_mask;
2981
2982 - *dma_addr = __map_single(dev, to_dma_ops_domain(domain),
2983 - virt_to_phys(virt_addr), PAGE_ALIGN(size),
2984 - DMA_BIDIRECTIONAL, dma_mask);
2985 + *dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
2986 + size, DMA_BIDIRECTIONAL, dma_mask);
2987 +
2988 if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
2989 goto out_free;
2990 - return virt_addr;
2991 +
2992 + return page_address(page);
2993
2994 out_free:
2995 - dma_direct_free(dev, size, virt_addr, *dma_addr, attrs);
2996 +
2997 + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2998 + __free_pages(page, get_order(size));
2999 +
3000 return NULL;
3001 }
3002
3003 @@ -2629,17 +2648,24 @@ static void free_coherent(struct device *dev, size_t size,
3004 void *virt_addr, dma_addr_t dma_addr,
3005 unsigned long attrs)
3006 {
3007 - struct protection_domain *domain = get_domain(dev);
3008 + struct protection_domain *domain;
3009 + struct dma_ops_domain *dma_dom;
3010 + struct page *page;
3011
3012 + page = virt_to_page(virt_addr);
3013 size = PAGE_ALIGN(size);
3014
3015 - if (!IS_ERR(domain)) {
3016 - struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
3017 + domain = get_domain(dev);
3018 + if (IS_ERR(domain))
3019 + goto free_mem;
3020
3021 - __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
3022 - }
3023 + dma_dom = to_dma_ops_domain(domain);
3024 +
3025 + __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
3026
3027 - dma_direct_free(dev, size, virt_addr, dma_addr, attrs);
3028 +free_mem:
3029 + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3030 + __free_pages(page, get_order(size));
3031 }
3032
3033 /*
3034 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
3035 index 5416f2b2ac21..ab16968fced8 100644
3036 --- a/drivers/irqchip/irq-gic-v3-its.c
3037 +++ b/drivers/irqchip/irq-gic-v3-its.c
3038 @@ -2309,7 +2309,14 @@ static int its_irq_domain_activate(struct irq_domain *domain,
3039 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
3040
3041 /* Bind the LPI to the first possible CPU */
3042 - cpu = cpumask_first(cpu_mask);
3043 + cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
3044 + if (cpu >= nr_cpu_ids) {
3045 + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
3046 + return -EINVAL;
3047 +
3048 + cpu = cpumask_first(cpu_online_mask);
3049 + }
3050 +
3051 its_dev->event_map.col_map[event] = cpu;
3052 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3053
3054 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
3055 index b11107497d2e..19478c7b2268 100644
3056 --- a/drivers/md/dm-thin.c
3057 +++ b/drivers/md/dm-thin.c
3058 @@ -1385,6 +1385,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
3059
3060 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
3061
3062 +static void requeue_bios(struct pool *pool);
3063 +
3064 static void check_for_space(struct pool *pool)
3065 {
3066 int r;
3067 @@ -1397,8 +1399,10 @@ static void check_for_space(struct pool *pool)
3068 if (r)
3069 return;
3070
3071 - if (nr_free)
3072 + if (nr_free) {
3073 set_pool_mode(pool, PM_WRITE);
3074 + requeue_bios(pool);
3075 + }
3076 }
3077
3078 /*
3079 @@ -1475,7 +1479,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
3080
3081 r = dm_pool_alloc_data_block(pool->pmd, result);
3082 if (r) {
3083 - metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
3084 + if (r == -ENOSPC)
3085 + set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
3086 + else
3087 + metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
3088 return r;
3089 }
3090
3091 diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
3092 index e73b0776683c..e302f1558fa0 100644
3093 --- a/drivers/md/dm-zoned-target.c
3094 +++ b/drivers/md/dm-zoned-target.c
3095 @@ -788,7 +788,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3096
3097 /* Chunk BIO work */
3098 mutex_init(&dmz->chunk_lock);
3099 - INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
3100 + INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
3101 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
3102 0, dev->name);
3103 if (!dmz->chunk_wq) {
3104 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3105 index 0a7b0107ca78..cabae3e280c2 100644
3106 --- a/drivers/md/dm.c
3107 +++ b/drivers/md/dm.c
3108 @@ -1582,10 +1582,9 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
3109 * the usage of io->orig_bio in dm_remap_zone_report()
3110 * won't be affected by this reassignment.
3111 */
3112 - struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
3113 - md->queue->bio_split);
3114 + struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
3115 + GFP_NOIO, md->queue->bio_split);
3116 ci.io->orig_bio = b;
3117 - bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
3118 bio_chain(b, bio);
3119 ret = generic_make_request(bio);
3120 break;
3121 diff --git a/drivers/md/md.c b/drivers/md/md.c
3122 index c208c01f63a5..bac480d75d1d 100644
3123 --- a/drivers/md/md.c
3124 +++ b/drivers/md/md.c
3125 @@ -2853,7 +2853,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
3126 err = 0;
3127 }
3128 } else if (cmd_match(buf, "re-add")) {
3129 - if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
3130 + if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3131 + rdev->saved_raid_disk >= 0) {
3132 /* clear_bit is performed _after_ all the devices
3133 * have their local Faulty bit cleared. If any writes
3134 * happen in the meantime in the local node, they
3135 @@ -8641,6 +8642,7 @@ static int remove_and_add_spares(struct mddev *mddev,
3136 if (mddev->pers->hot_remove_disk(
3137 mddev, rdev) == 0) {
3138 sysfs_unlink_rdev(mddev, rdev);
3139 + rdev->saved_raid_disk = rdev->raid_disk;
3140 rdev->raid_disk = -1;
3141 removed++;
3142 }
3143 diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
3144 index e33414975065..a4ada1ccf0df 100644
3145 --- a/drivers/media/dvb-core/dvb_frontend.c
3146 +++ b/drivers/media/dvb-core/dvb_frontend.c
3147 @@ -275,8 +275,20 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe,
3148 wake_up_interruptible (&events->wait_queue);
3149 }
3150
3151 +static int dvb_frontend_test_event(struct dvb_frontend_private *fepriv,
3152 + struct dvb_fe_events *events)
3153 +{
3154 + int ret;
3155 +
3156 + up(&fepriv->sem);
3157 + ret = events->eventw != events->eventr;
3158 + down(&fepriv->sem);
3159 +
3160 + return ret;
3161 +}
3162 +
3163 static int dvb_frontend_get_event(struct dvb_frontend *fe,
3164 - struct dvb_frontend_event *event, int flags)
3165 + struct dvb_frontend_event *event, int flags)
3166 {
3167 struct dvb_frontend_private *fepriv = fe->frontend_priv;
3168 struct dvb_fe_events *events = &fepriv->events;
3169 @@ -294,13 +306,8 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
3170 if (flags & O_NONBLOCK)
3171 return -EWOULDBLOCK;
3172
3173 - up(&fepriv->sem);
3174 -
3175 - ret = wait_event_interruptible (events->wait_queue,
3176 - events->eventw != events->eventr);
3177 -
3178 - if (down_interruptible (&fepriv->sem))
3179 - return -ERESTARTSYS;
3180 + ret = wait_event_interruptible(events->wait_queue,
3181 + dvb_frontend_test_event(fepriv, events));
3182
3183 if (ret < 0)
3184 return ret;
3185 diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
3186 index c2d3b8f0f487..93f69b3ac911 100644
3187 --- a/drivers/media/platform/vsp1/vsp1_video.c
3188 +++ b/drivers/media/platform/vsp1/vsp1_video.c
3189 @@ -849,9 +849,8 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
3190 return 0;
3191 }
3192
3193 -static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
3194 +static void vsp1_video_release_buffers(struct vsp1_video *video)
3195 {
3196 - struct vsp1_video *video = pipe->output->video;
3197 struct vsp1_vb2_buffer *buffer;
3198 unsigned long flags;
3199
3200 @@ -861,12 +860,18 @@ static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
3201 vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
3202 INIT_LIST_HEAD(&video->irqqueue);
3203 spin_unlock_irqrestore(&video->irqlock, flags);
3204 +}
3205 +
3206 +static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
3207 +{
3208 + lockdep_assert_held(&pipe->lock);
3209
3210 /* Release our partition table allocation */
3211 - mutex_lock(&pipe->lock);
3212 kfree(pipe->part_table);
3213 pipe->part_table = NULL;
3214 - mutex_unlock(&pipe->lock);
3215 +
3216 + vsp1_dl_list_put(pipe->dl);
3217 + pipe->dl = NULL;
3218 }
3219
3220 static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
3221 @@ -881,8 +886,9 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
3222 if (pipe->stream_count == pipe->num_inputs) {
3223 ret = vsp1_video_setup_pipeline(pipe);
3224 if (ret < 0) {
3225 - mutex_unlock(&pipe->lock);
3226 + vsp1_video_release_buffers(video);
3227 vsp1_video_cleanup_pipeline(pipe);
3228 + mutex_unlock(&pipe->lock);
3229 return ret;
3230 }
3231
3232 @@ -932,13 +938,12 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
3233 if (ret == -ETIMEDOUT)
3234 dev_err(video->vsp1->dev, "pipeline stop timeout\n");
3235
3236 - vsp1_dl_list_put(pipe->dl);
3237 - pipe->dl = NULL;
3238 + vsp1_video_cleanup_pipeline(pipe);
3239 }
3240 mutex_unlock(&pipe->lock);
3241
3242 media_pipeline_stop(&video->video.entity);
3243 - vsp1_video_cleanup_pipeline(pipe);
3244 + vsp1_video_release_buffers(video);
3245 vsp1_video_pipeline_put(pipe);
3246 }
3247
3248 diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
3249 index c110984ca671..5478fe08f9d3 100644
3250 --- a/drivers/media/rc/ir-mce_kbd-decoder.c
3251 +++ b/drivers/media/rc/ir-mce_kbd-decoder.c
3252 @@ -130,6 +130,8 @@ static void mce_kbd_rx_timeout(struct timer_list *t)
3253
3254 for (i = 0; i < MCIR2_MASK_KEYS_START; i++)
3255 input_report_key(raw->mce_kbd.idev, kbd_keycodes[i], 0);
3256 +
3257 + input_sync(raw->mce_kbd.idev);
3258 }
3259
3260 static enum mce_kbd_mode mce_kbd_mode(struct mce_kbd_dec *data)
3261 diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
3262 index c76b2101193c..89795d4d0a71 100644
3263 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c
3264 +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
3265 @@ -1024,6 +1024,9 @@ struct usb_device_id cx231xx_id_table[] = {
3266 .driver_info = CX231XX_BOARD_CNXT_RDE_250},
3267 {USB_DEVICE(0x0572, 0x58A0),
3268 .driver_info = CX231XX_BOARD_CNXT_RDU_250},
3269 + /* AverMedia DVD EZMaker 7 */
3270 + {USB_DEVICE(0x07ca, 0xc039),
3271 + .driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER},
3272 {USB_DEVICE(0x2040, 0xb110),
3273 .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL},
3274 {USB_DEVICE(0x2040, 0xb111),
3275 diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
3276 index 67ed66712d05..f31ffaf9d2f2 100644
3277 --- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
3278 +++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
3279 @@ -1151,7 +1151,7 @@ static int dvb_init(struct cx231xx *dev)
3280 info.platform_data = &si2157_config;
3281 request_module("si2157");
3282
3283 - client = i2c_new_device(adapter, &info);
3284 + client = i2c_new_device(tuner_i2c, &info);
3285 if (client == NULL || client->dev.driver == NULL) {
3286 module_put(dvb->i2c_client_demod[0]->dev.driver->owner);
3287 i2c_unregister_device(dvb->i2c_client_demod[0]);
3288 diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
3289 index aa0082fe5833..b28c997a7ab0 100644
3290 --- a/drivers/media/usb/uvc/uvc_video.c
3291 +++ b/drivers/media/usb/uvc/uvc_video.c
3292 @@ -163,14 +163,27 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
3293 }
3294 }
3295
3296 +static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
3297 +{
3298 + /*
3299 + * Return the size of the video probe and commit controls, which depends
3300 + * on the protocol version.
3301 + */
3302 + if (stream->dev->uvc_version < 0x0110)
3303 + return 26;
3304 + else if (stream->dev->uvc_version < 0x0150)
3305 + return 34;
3306 + else
3307 + return 48;
3308 +}
3309 +
3310 static int uvc_get_video_ctrl(struct uvc_streaming *stream,
3311 struct uvc_streaming_control *ctrl, int probe, u8 query)
3312 {
3313 + u16 size = uvc_video_ctrl_size(stream);
3314 u8 *data;
3315 - u16 size;
3316 int ret;
3317
3318 - size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
3319 if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
3320 query == UVC_GET_DEF)
3321 return -EIO;
3322 @@ -225,7 +238,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
3323 ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
3324 ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
3325
3326 - if (size == 34) {
3327 + if (size >= 34) {
3328 ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
3329 ctrl->bmFramingInfo = data[30];
3330 ctrl->bPreferedVersion = data[31];
3331 @@ -254,11 +267,10 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
3332 static int uvc_set_video_ctrl(struct uvc_streaming *stream,
3333 struct uvc_streaming_control *ctrl, int probe)
3334 {
3335 + u16 size = uvc_video_ctrl_size(stream);
3336 u8 *data;
3337 - u16 size;
3338 int ret;
3339
3340 - size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
3341 data = kzalloc(size, GFP_KERNEL);
3342 if (data == NULL)
3343 return -ENOMEM;
3344 @@ -275,7 +287,7 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream,
3345 put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
3346 put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
3347
3348 - if (size == 34) {
3349 + if (size >= 34) {
3350 put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
3351 data[30] = ctrl->bmFramingInfo;
3352 data[31] = ctrl->bPreferedVersion;
3353 diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
3354 index 4312935f1dfc..d03a44d89649 100644
3355 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
3356 +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
3357 @@ -871,7 +871,7 @@ static int put_v4l2_ext_controls32(struct file *file,
3358 get_user(kcontrols, &kp->controls))
3359 return -EFAULT;
3360
3361 - if (!count)
3362 + if (!count || count > (U32_MAX/sizeof(*ucontrols)))
3363 return 0;
3364 if (get_user(p, &up->controls))
3365 return -EFAULT;
3366 diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
3367 index d1c46de89eb4..d9ae983095c5 100644
3368 --- a/drivers/mfd/intel-lpss-pci.c
3369 +++ b/drivers/mfd/intel-lpss-pci.c
3370 @@ -124,6 +124,11 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
3371 .properties = apl_i2c_properties,
3372 };
3373
3374 +static const struct intel_lpss_platform_info cnl_i2c_info = {
3375 + .clk_rate = 216000000,
3376 + .properties = spt_i2c_properties,
3377 +};
3378 +
3379 static const struct pci_device_id intel_lpss_pci_ids[] = {
3380 /* BXT A-Step */
3381 { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
3382 @@ -207,13 +212,13 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
3383 { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&spt_info },
3384 { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&spt_info },
3385 { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&spt_info },
3386 - { PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&spt_i2c_info },
3387 - { PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&spt_i2c_info },
3388 + { PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&cnl_i2c_info },
3389 + { PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&cnl_i2c_info },
3390 { PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info },
3391 - { PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&spt_i2c_info },
3392 - { PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&spt_i2c_info },
3393 - { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&spt_i2c_info },
3394 - { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&spt_i2c_info },
3395 + { PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&cnl_i2c_info },
3396 + { PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&cnl_i2c_info },
3397 + { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&cnl_i2c_info },
3398 + { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&cnl_i2c_info },
3399 /* SPT-H */
3400 { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
3401 { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
3402 @@ -240,10 +245,10 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
3403 { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&spt_info },
3404 { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info },
3405 { PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info },
3406 - { PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&spt_i2c_info },
3407 - { PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&spt_i2c_info },
3408 - { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&spt_i2c_info },
3409 - { PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&spt_i2c_info },
3410 + { PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&cnl_i2c_info },
3411 + { PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&cnl_i2c_info },
3412 + { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&cnl_i2c_info },
3413 + { PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&cnl_i2c_info },
3414 { }
3415 };
3416 MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
3417 diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
3418 index 9e545eb6e8b4..4bcf117a7ba8 100644
3419 --- a/drivers/mfd/intel-lpss.c
3420 +++ b/drivers/mfd/intel-lpss.c
3421 @@ -275,11 +275,11 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
3422
3423 intel_lpss_deassert_reset(lpss);
3424
3425 + intel_lpss_set_remap_addr(lpss);
3426 +
3427 if (!intel_lpss_has_idma(lpss))
3428 return;
3429
3430 - intel_lpss_set_remap_addr(lpss);
3431 -
3432 /* Make sure that SPI multiblock DMA transfers are re-enabled */
3433 if (lpss->type == LPSS_DEV_SPI)
3434 writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
3435 diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
3436 index d3133a371e27..c649344fd7f2 100644
3437 --- a/drivers/mfd/twl-core.c
3438 +++ b/drivers/mfd/twl-core.c
3439 @@ -1177,7 +1177,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
3440 twl_priv->ready = true;
3441
3442 /* setup clock framework */
3443 - clocks_init(&pdev->dev, pdata ? pdata->clock : NULL);
3444 + clocks_init(&client->dev, pdata ? pdata->clock : NULL);
3445
3446 /* read TWL IDCODE Register */
3447 if (twl_class_is_4030()) {
3448 diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
3449 index 4d6736f9d463..429d6de1dde7 100644
3450 --- a/drivers/misc/cxl/pci.c
3451 +++ b/drivers/misc/cxl/pci.c
3452 @@ -514,9 +514,9 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
3453 cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl);
3454
3455 /* Setup the PSL to transmit packets on the PCIe before the
3456 - * CAPP is enabled
3457 + * CAPP is enabled. Make sure that CAPP virtual machines are disabled
3458 */
3459 - cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000002A10ULL);
3460 + cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000012A10ULL);
3461
3462 /*
3463 * A response to an ASB_Notify request is returned by the
3464 diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
3465 index 4b5a4c5d3c01..629e2e156412 100644
3466 --- a/drivers/misc/cxl/sysfs.c
3467 +++ b/drivers/misc/cxl/sysfs.c
3468 @@ -353,12 +353,20 @@ static ssize_t prefault_mode_store(struct device *device,
3469 struct cxl_afu *afu = to_cxl_afu(device);
3470 enum prefault_modes mode = -1;
3471
3472 - if (!strncmp(buf, "work_element_descriptor", 23))
3473 - mode = CXL_PREFAULT_WED;
3474 - if (!strncmp(buf, "all", 3))
3475 - mode = CXL_PREFAULT_ALL;
3476 if (!strncmp(buf, "none", 4))
3477 mode = CXL_PREFAULT_NONE;
3478 + else {
3479 + if (!radix_enabled()) {
3480 +
3481 + /* only allowed when not in radix mode */
3482 + if (!strncmp(buf, "work_element_descriptor", 23))
3483 + mode = CXL_PREFAULT_WED;
3484 + if (!strncmp(buf, "all", 3))
3485 + mode = CXL_PREFAULT_ALL;
3486 + } else {
3487 + dev_err(device, "Cannot prefault with radix enabled\n");
3488 + }
3489 + }
3490
3491 if (mode == -1)
3492 return -EINVAL;
3493 diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
3494 index 51e01f03fb99..45c015da2e75 100644
3495 --- a/drivers/mmc/host/renesas_sdhi_core.c
3496 +++ b/drivers/mmc/host/renesas_sdhi_core.c
3497 @@ -28,6 +28,7 @@
3498 #include <linux/of_device.h>
3499 #include <linux/platform_device.h>
3500 #include <linux/mmc/host.h>
3501 +#include <linux/mmc/slot-gpio.h>
3502 #include <linux/mfd/tmio.h>
3503 #include <linux/sh_dma.h>
3504 #include <linux/delay.h>
3505 @@ -534,6 +535,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
3506 host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
3507 host->dma_ops = dma_ops;
3508
3509 + /* For some SoC, we disable internal WP. GPIO may override this */
3510 + if (mmc_can_gpio_ro(host->mmc))
3511 + mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT;
3512 +
3513 /* SDR speeds are only available on Gen2+ */
3514 if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) {
3515 /* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */
3516 diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
3517 index 6af946d16d24..eb027cdc8f24 100644
3518 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
3519 +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
3520 @@ -87,6 +87,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
3521 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
3522 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
3523 MMC_CAP_CMD23,
3524 + .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
3525 .bus_shift = 2,
3526 .scc_offset = 0x1000,
3527 .taps = rcar_gen3_scc_taps,
3528 diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
3529 index 848e50c1638a..4bb46c489d71 100644
3530 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
3531 +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
3532 @@ -42,6 +42,7 @@ static const struct renesas_sdhi_of_data of_rz_compatible = {
3533 static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = {
3534 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL,
3535 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
3536 + .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
3537 };
3538
3539 /* Definitions for sampling clocks */
3540 @@ -61,6 +62,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
3541 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
3542 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
3543 MMC_CAP_CMD23,
3544 + .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
3545 .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
3546 .dma_rx_offset = 0x2000,
3547 .scc_offset = 0x0300,
3548 @@ -81,6 +83,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
3549 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
3550 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
3551 MMC_CAP_CMD23,
3552 + .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
3553 .bus_shift = 2,
3554 .scc_offset = 0x1000,
3555 .taps = rcar_gen3_scc_taps,
3556 diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
3557 index 692902df2598..3a8a88fa06aa 100644
3558 --- a/drivers/mtd/chips/cfi_cmdset_0002.c
3559 +++ b/drivers/mtd/chips/cfi_cmdset_0002.c
3560 @@ -1880,7 +1880,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
3561 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
3562 break;
3563
3564 - if (chip_ready(map, adr)) {
3565 + if (chip_good(map, adr, datum)) {
3566 xip_enable(map, chip, adr);
3567 goto op_done;
3568 }
3569 @@ -2515,7 +2515,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
3570
3571 struct ppb_lock {
3572 struct flchip *chip;
3573 - loff_t offset;
3574 + unsigned long adr;
3575 int locked;
3576 };
3577
3578 @@ -2533,8 +2533,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
3579 unsigned long timeo;
3580 int ret;
3581
3582 + adr += chip->start;
3583 mutex_lock(&chip->mutex);
3584 - ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
3585 + ret = get_chip(map, chip, adr, FL_LOCKING);
3586 if (ret) {
3587 mutex_unlock(&chip->mutex);
3588 return ret;
3589 @@ -2552,8 +2553,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
3590
3591 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
3592 chip->state = FL_LOCKING;
3593 - map_write(map, CMD(0xA0), chip->start + adr);
3594 - map_write(map, CMD(0x00), chip->start + adr);
3595 + map_write(map, CMD(0xA0), adr);
3596 + map_write(map, CMD(0x00), adr);
3597 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
3598 /*
3599 * Unlocking of one specific sector is not supported, so we
3600 @@ -2591,7 +2592,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
3601 map_write(map, CMD(0x00), chip->start);
3602
3603 chip->state = FL_READY;
3604 - put_chip(map, chip, adr + chip->start);
3605 + put_chip(map, chip, adr);
3606 mutex_unlock(&chip->mutex);
3607
3608 return ret;
3609 @@ -2648,9 +2649,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
3610 * sectors shall be unlocked, so lets keep their locking
3611 * status at "unlocked" (locked=0) for the final re-locking.
3612 */
3613 - if ((adr < ofs) || (adr >= (ofs + len))) {
3614 + if ((offset < ofs) || (offset >= (ofs + len))) {
3615 sect[sectors].chip = &cfi->chips[chipnum];
3616 - sect[sectors].offset = offset;
3617 + sect[sectors].adr = adr;
3618 sect[sectors].locked = do_ppb_xxlock(
3619 map, &cfi->chips[chipnum], adr, 0,
3620 DO_XXLOCK_ONEBLOCK_GETLOCK);
3621 @@ -2664,6 +2665,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
3622 i++;
3623
3624 if (adr >> cfi->chipshift) {
3625 + if (offset >= (ofs + len))
3626 + break;
3627 adr = 0;
3628 chipnum++;
3629
3630 @@ -2694,7 +2697,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
3631 */
3632 for (i = 0; i < sectors; i++) {
3633 if (sect[i].locked)
3634 - do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
3635 + do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
3636 DO_XXLOCK_ONEBLOCK_LOCK);
3637 }
3638
3639 diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
3640 index cfd33e6ca77f..5869e90cc14b 100644
3641 --- a/drivers/mtd/nand/raw/denali_dt.c
3642 +++ b/drivers/mtd/nand/raw/denali_dt.c
3643 @@ -123,7 +123,11 @@ static int denali_dt_probe(struct platform_device *pdev)
3644 if (ret)
3645 return ret;
3646
3647 - denali->clk_x_rate = clk_get_rate(dt->clk);
3648 + /*
3649 + * Hardcode the clock rate for the backward compatibility.
3650 + * This works for both SOCFPGA and UniPhier.
3651 + */
3652 + denali->clk_x_rate = 200000000;
3653
3654 ret = denali_init(denali);
3655 if (ret)
3656 diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
3657 index 45786e707b7b..26cef218bb43 100644
3658 --- a/drivers/mtd/nand/raw/mxc_nand.c
3659 +++ b/drivers/mtd/nand/raw/mxc_nand.c
3660 @@ -48,7 +48,7 @@
3661 #define NFC_V1_V2_CONFIG (host->regs + 0x0a)
3662 #define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
3663 #define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
3664 -#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10)
3665 +#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
3666 #define NFC_V1_V2_WRPROT (host->regs + 0x12)
3667 #define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
3668 #define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
3669 @@ -1274,6 +1274,9 @@ static void preset_v2(struct mtd_info *mtd)
3670 writew(config1, NFC_V1_V2_CONFIG1);
3671 /* preset operation */
3672
3673 + /* spare area size in 16-bit half-words */
3674 + writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
3675 +
3676 /* Unlock the internal RAM Buffer */
3677 writew(0x2, NFC_V1_V2_CONFIG);
3678
3679 diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
3680 index f28c3a555861..7a881000eeba 100644
3681 --- a/drivers/mtd/nand/raw/nand_base.c
3682 +++ b/drivers/mtd/nand/raw/nand_base.c
3683 @@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
3684
3685 for (; page < page_end; page++) {
3686 res = chip->ecc.read_oob(mtd, chip, page);
3687 - if (res)
3688 + if (res < 0)
3689 return res;
3690
3691 bad = chip->oob_poi[chip->badblockpos];
3692 @@ -2174,7 +2174,6 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
3693 struct mtd_info *mtd = nand_to_mtd(chip);
3694 const u8 *params = data;
3695 int i, ret;
3696 - u8 status;
3697
3698 if (chip->exec_op) {
3699 const struct nand_sdr_timings *sdr =
3700 @@ -2188,26 +2187,18 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
3701 };
3702 struct nand_operation op = NAND_OPERATION(instrs);
3703
3704 - ret = nand_exec_op(chip, &op);
3705 - if (ret)
3706 - return ret;
3707 -
3708 - ret = nand_status_op(chip, &status);
3709 - if (ret)
3710 - return ret;
3711 - } else {
3712 - chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
3713 - for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3714 - chip->write_byte(mtd, params[i]);
3715 + return nand_exec_op(chip, &op);
3716 + }
3717
3718 - ret = chip->waitfunc(mtd, chip);
3719 - if (ret < 0)
3720 - return ret;
3721 + chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
3722 + for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3723 + chip->write_byte(mtd, params[i]);
3724
3725 - status = ret;
3726 - }
3727 + ret = chip->waitfunc(mtd, chip);
3728 + if (ret < 0)
3729 + return ret;
3730
3731 - if (status & NAND_STATUS_FAIL)
3732 + if (ret & NAND_STATUS_FAIL)
3733 return -EIO;
3734
3735 return 0;
3736 diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
3737 index 7ed1f87e742a..49c546c97c6f 100644
3738 --- a/drivers/mtd/nand/raw/nand_macronix.c
3739 +++ b/drivers/mtd/nand/raw/nand_macronix.c
3740 @@ -17,23 +17,47 @@
3741
3742 #include <linux/mtd/rawnand.h>
3743
3744 +/*
3745 + * Macronix AC series does not support using SET/GET_FEATURES to change
3746 + * the timings unlike what is declared in the parameter page. Unflag
3747 + * this feature to avoid unnecessary downturns.
3748 + */
3749 +static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
3750 +{
3751 + unsigned int i;
3752 + static const char * const broken_get_timings[] = {
3753 + "MX30LF1G18AC",
3754 + "MX30LF1G28AC",
3755 + "MX30LF2G18AC",
3756 + "MX30LF2G28AC",
3757 + "MX30LF4G18AC",
3758 + "MX30LF4G28AC",
3759 + "MX60LF8G18AC",
3760 + };
3761 +
3762 + if (!chip->parameters.supports_set_get_features)
3763 + return;
3764 +
3765 + for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
3766 + if (!strcmp(broken_get_timings[i], chip->parameters.model))
3767 + break;
3768 + }
3769 +
3770 + if (i == ARRAY_SIZE(broken_get_timings))
3771 + return;
3772 +
3773 + bitmap_clear(chip->parameters.get_feature_list,
3774 + ONFI_FEATURE_ADDR_TIMING_MODE, 1);
3775 + bitmap_clear(chip->parameters.set_feature_list,
3776 + ONFI_FEATURE_ADDR_TIMING_MODE, 1);
3777 +}
3778 +
3779 static int macronix_nand_init(struct nand_chip *chip)
3780 {
3781 if (nand_is_slc(chip))
3782 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
3783
3784 - /*
3785 - * MX30LF2G18AC chip does not support using SET/GET_FEATURES to change
3786 - * the timings unlike what is declared in the parameter page. Unflag
3787 - * this feature to avoid unnecessary downturns.
3788 - */
3789 - if (chip->parameters.supports_set_get_features &&
3790 - !strcmp("MX30LF2G18AC", chip->parameters.model)) {
3791 - bitmap_clear(chip->parameters.get_feature_list,
3792 - ONFI_FEATURE_ADDR_TIMING_MODE, 1);
3793 - bitmap_clear(chip->parameters.set_feature_list,
3794 - ONFI_FEATURE_ADDR_TIMING_MODE, 1);
3795 - }
3796 + macronix_nand_fix_broken_get_timings(chip);
3797
3798 return 0;
3799 }
3800 diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
3801 index 0af45b134c0c..5ec4c90a637d 100644
3802 --- a/drivers/mtd/nand/raw/nand_micron.c
3803 +++ b/drivers/mtd/nand/raw/nand_micron.c
3804 @@ -66,7 +66,9 @@ static int micron_nand_onfi_init(struct nand_chip *chip)
3805
3806 if (p->supports_set_get_features) {
3807 set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
3808 + set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
3809 set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
3810 + set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
3811 }
3812
3813 return 0;
3814 diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
3815 index 699951523179..8e98f4ab87c1 100644
3816 --- a/drivers/mtd/spi-nor/intel-spi.c
3817 +++ b/drivers/mtd/spi-nor/intel-spi.c
3818 @@ -136,6 +136,7 @@
3819 * @swseq_reg: Use SW sequencer in register reads/writes
3820 * @swseq_erase: Use SW sequencer in erase operation
3821 * @erase_64k: 64k erase supported
3822 + * @atomic_preopcode: Holds preopcode when atomic sequence is requested
3823 * @opcodes: Opcodes which are supported. This are programmed by BIOS
3824 * before it locks down the controller.
3825 */
3826 @@ -153,6 +154,7 @@ struct intel_spi {
3827 bool swseq_reg;
3828 bool swseq_erase;
3829 bool erase_64k;
3830 + u8 atomic_preopcode;
3831 u8 opcodes[8];
3832 };
3833
3834 @@ -474,7 +476,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
3835 int optype)
3836 {
3837 u32 val = 0, status;
3838 - u16 preop;
3839 + u8 atomic_preopcode;
3840 int ret;
3841
3842 ret = intel_spi_opcode_index(ispi, opcode, optype);
3843 @@ -484,17 +486,42 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
3844 if (len > INTEL_SPI_FIFO_SZ)
3845 return -EINVAL;
3846
3847 + /*
3848 + * Always clear it after each SW sequencer operation regardless
3849 + * of whether it is successful or not.
3850 + */
3851 + atomic_preopcode = ispi->atomic_preopcode;
3852 + ispi->atomic_preopcode = 0;
3853 +
3854 /* Only mark 'Data Cycle' bit when there is data to be transferred */
3855 if (len > 0)
3856 val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
3857 val |= ret << SSFSTS_CTL_COP_SHIFT;
3858 val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
3859 val |= SSFSTS_CTL_SCGO;
3860 - preop = readw(ispi->sregs + PREOP_OPTYPE);
3861 - if (preop) {
3862 - val |= SSFSTS_CTL_ACS;
3863 - if (preop >> 8)
3864 - val |= SSFSTS_CTL_SPOP;
3865 + if (atomic_preopcode) {
3866 + u16 preop;
3867 +
3868 + switch (optype) {
3869 + case OPTYPE_WRITE_NO_ADDR:
3870 + case OPTYPE_WRITE_WITH_ADDR:
3871 + /* Pick matching preopcode for the atomic sequence */
3872 + preop = readw(ispi->sregs + PREOP_OPTYPE);
3873 + if ((preop & 0xff) == atomic_preopcode)
3874 + ; /* Do nothing */
3875 + else if ((preop >> 8) == atomic_preopcode)
3876 + val |= SSFSTS_CTL_SPOP;
3877 + else
3878 + return -EINVAL;
3879 +
3880 + /* Enable atomic sequence */
3881 + val |= SSFSTS_CTL_ACS;
3882 + break;
3883 +
3884 + default:
3885 + return -EINVAL;
3886 + }
3887 +
3888 }
3889 writel(val, ispi->sregs + SSFSTS_CTL);
3890
3891 @@ -538,13 +565,31 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
3892
3893 /*
3894 * This is handled with atomic operation and preop code in Intel
3895 - * controller so skip it here now. If the controller is not locked,
3896 - * program the opcode to the PREOP register for later use.
3897 + * controller so we only verify that it is available. If the
3898 + * controller is not locked, program the opcode to the PREOP
3899 + * register for later use.
3900 + *
3901 + * When hardware sequencer is used there is no need to program
3902 + * any opcodes (it handles them automatically as part of a command).
3903 */
3904 if (opcode == SPINOR_OP_WREN) {
3905 - if (!ispi->locked)
3906 + u16 preop;
3907 +
3908 + if (!ispi->swseq_reg)
3909 + return 0;
3910 +
3911 + preop = readw(ispi->sregs + PREOP_OPTYPE);
3912 + if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
3913 + if (ispi->locked)
3914 + return -EINVAL;
3915 writel(opcode, ispi->sregs + PREOP_OPTYPE);
3916 + }
3917
3918 + /*
3919 + * This enables atomic sequence on next SW sycle. Will
3920 + * be cleared after next operation.
3921 + */
3922 + ispi->atomic_preopcode = opcode;
3923 return 0;
3924 }
3925
3926 @@ -569,6 +614,13 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
3927 u32 val, status;
3928 ssize_t ret;
3929
3930 + /*
3931 + * Atomic sequence is not expected with HW sequencer reads. Make
3932 + * sure it is cleared regardless.
3933 + */
3934 + if (WARN_ON_ONCE(ispi->atomic_preopcode))
3935 + ispi->atomic_preopcode = 0;
3936 +
3937 switch (nor->read_opcode) {
3938 case SPINOR_OP_READ:
3939 case SPINOR_OP_READ_FAST:
3940 @@ -627,6 +679,9 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
3941 u32 val, status;
3942 ssize_t ret;
3943
3944 + /* Not needed with HW sequencer write, make sure it is cleared */
3945 + ispi->atomic_preopcode = 0;
3946 +
3947 while (len > 0) {
3948 block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
3949
3950 @@ -707,6 +762,9 @@ static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
3951 return 0;
3952 }
3953
3954 + /* Not needed with HW sequencer erase, make sure it is cleared */
3955 + ispi->atomic_preopcode = 0;
3956 +
3957 while (len > 0) {
3958 writel(offs, ispi->base + FADDR);
3959
3960 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
3961 index 753494e042d5..74425af840d6 100644
3962 --- a/drivers/mtd/ubi/build.c
3963 +++ b/drivers/mtd/ubi/build.c
3964 @@ -1091,6 +1091,9 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
3965 if (ubi->bgt_thread)
3966 kthread_stop(ubi->bgt_thread);
3967
3968 +#ifdef CONFIG_MTD_UBI_FASTMAP
3969 + cancel_work_sync(&ubi->fm_work);
3970 +#endif
3971 ubi_debugfs_exit_dev(ubi);
3972 uif_close(ubi);
3973
3974 diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
3975 index 250e30fac61b..593a4f9d97e3 100644
3976 --- a/drivers/mtd/ubi/eba.c
3977 +++ b/drivers/mtd/ubi/eba.c
3978 @@ -490,6 +490,82 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
3979 return err;
3980 }
3981
3982 +#ifdef CONFIG_MTD_UBI_FASTMAP
3983 +/**
3984 + * check_mapping - check and fixup a mapping
3985 + * @ubi: UBI device description object
3986 + * @vol: volume description object
3987 + * @lnum: logical eraseblock number
3988 + * @pnum: physical eraseblock number
3989 + *
3990 + * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
3991 + * operations, if such an operation is interrupted the mapping still looks
3992 + * good, but upon first read an ECC is reported to the upper layer.
3993 + * Normaly during the full-scan at attach time this is fixed, for Fastmap
3994 + * we have to deal with it while reading.
3995 + * If the PEB behind a LEB shows this symthom we change the mapping to
3996 + * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
3997 + *
3998 + * Returns 0 on success, negative error code in case of failure.
3999 + */
4000 +static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
4001 + int *pnum)
4002 +{
4003 + int err;
4004 + struct ubi_vid_io_buf *vidb;
4005 +
4006 + if (!ubi->fast_attach)
4007 + return 0;
4008 +
4009 + vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
4010 + if (!vidb)
4011 + return -ENOMEM;
4012 +
4013 + err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
4014 + if (err > 0 && err != UBI_IO_BITFLIPS) {
4015 + int torture = 0;
4016 +
4017 + switch (err) {
4018 + case UBI_IO_FF:
4019 + case UBI_IO_FF_BITFLIPS:
4020 + case UBI_IO_BAD_HDR:
4021 + case UBI_IO_BAD_HDR_EBADMSG:
4022 + break;
4023 + default:
4024 + ubi_assert(0);
4025 + }
4026 +
4027 + if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
4028 + torture = 1;
4029 +
4030 + down_read(&ubi->fm_eba_sem);
4031 + vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
4032 + up_read(&ubi->fm_eba_sem);
4033 + ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
4034 +
4035 + *pnum = UBI_LEB_UNMAPPED;
4036 + } else if (err < 0) {
4037 + ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
4038 + *pnum, err);
4039 +
4040 + goto out_free;
4041 + }
4042 +
4043 + err = 0;
4044 +
4045 +out_free:
4046 + ubi_free_vid_buf(vidb);
4047 +
4048 + return err;
4049 +}
4050 +#else
4051 +static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
4052 + int *pnum)
4053 +{
4054 + return 0;
4055 +}
4056 +#endif
4057 +
4058 /**
4059 * ubi_eba_read_leb - read data.
4060 * @ubi: UBI device description object
4061 @@ -522,7 +598,13 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
4062 return err;
4063
4064 pnum = vol->eba_tbl->entries[lnum].pnum;
4065 - if (pnum < 0) {
4066 + if (pnum >= 0) {
4067 + err = check_mapping(ubi, vol, lnum, &pnum);
4068 + if (err < 0)
4069 + goto out_unlock;
4070 + }
4071 +
4072 + if (pnum == UBI_LEB_UNMAPPED) {
4073 /*
4074 * The logical eraseblock is not mapped, fill the whole buffer
4075 * with 0xFF bytes. The exception is static volumes for which
4076 @@ -930,6 +1012,12 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
4077 return err;
4078
4079 pnum = vol->eba_tbl->entries[lnum].pnum;
4080 + if (pnum >= 0) {
4081 + err = check_mapping(ubi, vol, lnum, &pnum);
4082 + if (err < 0)
4083 + goto out;
4084 + }
4085 +
4086 if (pnum >= 0) {
4087 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
4088 len, offset, vol_id, lnum, pnum);
4089 diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
4090 index 2052a647220e..f66b3b22f328 100644
4091 --- a/drivers/mtd/ubi/wl.c
4092 +++ b/drivers/mtd/ubi/wl.c
4093 @@ -1505,6 +1505,7 @@ int ubi_thread(void *u)
4094 }
4095
4096 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
4097 + ubi->thread_enabled = 0;
4098 return 0;
4099 }
4100
4101 @@ -1514,9 +1515,6 @@ int ubi_thread(void *u)
4102 */
4103 static void shutdown_work(struct ubi_device *ubi)
4104 {
4105 -#ifdef CONFIG_MTD_UBI_FASTMAP
4106 - flush_work(&ubi->fm_work);
4107 -#endif
4108 while (!list_empty(&ubi->works)) {
4109 struct ubi_work *wrk;
4110
4111 diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
4112 index 38828ab77eb9..1480c094b57d 100644
4113 --- a/drivers/net/ethernet/ti/davinci_emac.c
4114 +++ b/drivers/net/ethernet/ti/davinci_emac.c
4115 @@ -1385,6 +1385,11 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
4116 return -EOPNOTSUPP;
4117 }
4118
4119 +static int match_first_device(struct device *dev, void *data)
4120 +{
4121 + return !strncmp(dev_name(dev), "davinci_mdio", 12);
4122 +}
4123 +
4124 /**
4125 * emac_dev_open - EMAC device open
4126 * @ndev: The DaVinci EMAC network adapter
4127 @@ -1484,8 +1489,14 @@ static int emac_dev_open(struct net_device *ndev)
4128
4129 /* use the first phy on the bus if pdata did not give us a phy id */
4130 if (!phydev && !priv->phy_id) {
4131 - phy = bus_find_device_by_name(&mdio_bus_type, NULL,
4132 - "davinci_mdio");
4133 + /* NOTE: we can't use bus_find_device_by_name() here because
4134 + * the device name is not guaranteed to be 'davinci_mdio'. On
4135 + * some systems it can be 'davinci_mdio.0' so we need to use
4136 + * strncmp() against the first part of the string to correctly
4137 + * match it.
4138 + */
4139 + phy = bus_find_device(&mdio_bus_type, NULL, NULL,
4140 + match_first_device);
4141 if (phy) {
4142 priv->phy_id = dev_name(phy);
4143 if (!priv->phy_id || !*priv->phy_id)
4144 diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
4145 index a64023690cad..b9e0d30e317a 100644
4146 --- a/drivers/nvdimm/bus.c
4147 +++ b/drivers/nvdimm/bus.c
4148 @@ -566,14 +566,18 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
4149 {
4150 struct device *dev = disk_to_dev(disk)->parent;
4151 struct nd_region *nd_region = to_nd_region(dev->parent);
4152 - const char *pol = nd_region->ro ? "only" : "write";
4153 + int disk_ro = get_disk_ro(disk);
4154
4155 - if (nd_region->ro == get_disk_ro(disk))
4156 + /*
4157 + * Upgrade to read-only if the region is read-only preserve as
4158 + * read-only if the disk is already read-only.
4159 + */
4160 + if (disk_ro || nd_region->ro == disk_ro)
4161 return 0;
4162
4163 - dev_info(dev, "%s read-%s, marking %s read-%s\n",
4164 - dev_name(&nd_region->dev), pol, disk->disk_name, pol);
4165 - set_disk_ro(disk, nd_region->ro);
4166 + dev_info(dev, "%s read-only, marking %s read-only\n",
4167 + dev_name(&nd_region->dev), disk->disk_name);
4168 + set_disk_ro(disk, 1);
4169
4170 return 0;
4171
4172 diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
4173 index 9d714926ecf5..d7193c4a6ee2 100644
4174 --- a/drivers/nvdimm/pmem.c
4175 +++ b/drivers/nvdimm/pmem.c
4176 @@ -299,7 +299,7 @@ static int pmem_attach_disk(struct device *dev,
4177 {
4178 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
4179 struct nd_region *nd_region = to_nd_region(dev->parent);
4180 - int nid = dev_to_node(dev), fua, wbc;
4181 + int nid = dev_to_node(dev), fua;
4182 struct resource *res = &nsio->res;
4183 struct resource bb_res;
4184 struct nd_pfn *nd_pfn = NULL;
4185 @@ -335,7 +335,6 @@ static int pmem_attach_disk(struct device *dev,
4186 dev_warn(dev, "unable to guarantee persistence of writes\n");
4187 fua = 0;
4188 }
4189 - wbc = nvdimm_has_cache(nd_region);
4190
4191 if (!devm_request_mem_region(dev, res->start, resource_size(res),
4192 dev_name(&ndns->dev))) {
4193 @@ -382,13 +381,14 @@ static int pmem_attach_disk(struct device *dev,
4194 return PTR_ERR(addr);
4195 pmem->virt_addr = addr;
4196
4197 - blk_queue_write_cache(q, wbc, fua);
4198 + blk_queue_write_cache(q, true, fua);
4199 blk_queue_make_request(q, pmem_make_request);
4200 blk_queue_physical_block_size(q, PAGE_SIZE);
4201 blk_queue_logical_block_size(q, pmem_sector_size(ndns));
4202 blk_queue_max_hw_sectors(q, UINT_MAX);
4203 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
4204 - blk_queue_flag_set(QUEUE_FLAG_DAX, q);
4205 + if (pmem->pfn_flags & PFN_MAP)
4206 + blk_queue_flag_set(QUEUE_FLAG_DAX, q);
4207 q->queuedata = pmem;
4208
4209 disk = alloc_disk_node(0, nid);
4210 @@ -413,7 +413,7 @@ static int pmem_attach_disk(struct device *dev,
4211 put_disk(disk);
4212 return -ENOMEM;
4213 }
4214 - dax_write_cache(dax_dev, wbc);
4215 + dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
4216 pmem->dax_dev = dax_dev;
4217
4218 gendev = disk_to_dev(disk);
4219 diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
4220 index a612be6f019d..ec3543b83330 100644
4221 --- a/drivers/nvdimm/region_devs.c
4222 +++ b/drivers/nvdimm/region_devs.c
4223 @@ -1132,7 +1132,8 @@ EXPORT_SYMBOL_GPL(nvdimm_has_flush);
4224
4225 int nvdimm_has_cache(struct nd_region *nd_region)
4226 {
4227 - return is_nd_pmem(&nd_region->dev);
4228 + return is_nd_pmem(&nd_region->dev) &&
4229 + !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
4230 }
4231 EXPORT_SYMBOL_GPL(nvdimm_has_cache);
4232
4233 diff --git a/drivers/of/platform.c b/drivers/of/platform.c
4234 index c00d81dfac0b..9c91f97ffbe1 100644
4235 --- a/drivers/of/platform.c
4236 +++ b/drivers/of/platform.c
4237 @@ -537,6 +537,9 @@ int of_platform_device_destroy(struct device *dev, void *data)
4238 if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS))
4239 device_for_each_child(dev, NULL, of_platform_device_destroy);
4240
4241 + of_node_clear_flag(dev->of_node, OF_POPULATED);
4242 + of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
4243 +
4244 if (dev->bus == &platform_bus_type)
4245 platform_device_unregister(to_platform_device(dev));
4246 #ifdef CONFIG_ARM_AMBA
4247 @@ -544,8 +547,6 @@ int of_platform_device_destroy(struct device *dev, void *data)
4248 amba_device_unregister(to_amba_device(dev));
4249 #endif
4250
4251 - of_node_clear_flag(dev->of_node, OF_POPULATED);
4252 - of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
4253 return 0;
4254 }
4255 EXPORT_SYMBOL_GPL(of_platform_device_destroy);
4256 diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
4257 index 65d0b7adfcd4..7edfac6f1914 100644
4258 --- a/drivers/of/resolver.c
4259 +++ b/drivers/of/resolver.c
4260 @@ -122,6 +122,11 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay,
4261 goto err_fail;
4262 }
4263
4264 + if (offset < 0 || offset + sizeof(__be32) > prop->length) {
4265 + err = -EINVAL;
4266 + goto err_fail;
4267 + }
4268 +
4269 *(__be32 *)(prop->value + offset) = cpu_to_be32(phandle);
4270 }
4271
4272 diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
4273 index 6bb37c18292a..ecee50d10d14 100644
4274 --- a/drivers/of/unittest.c
4275 +++ b/drivers/of/unittest.c
4276 @@ -165,20 +165,20 @@ static void __init of_unittest_dynamic(void)
4277 /* Add a new property - should pass*/
4278 prop->name = "new-property";
4279 prop->value = "new-property-data";
4280 - prop->length = strlen(prop->value);
4281 + prop->length = strlen(prop->value) + 1;
4282 unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
4283
4284 /* Try to add an existing property - should fail */
4285 prop++;
4286 prop->name = "new-property";
4287 prop->value = "new-property-data-should-fail";
4288 - prop->length = strlen(prop->value);
4289 + prop->length = strlen(prop->value) + 1;
4290 unittest(of_add_property(np, prop) != 0,
4291 "Adding an existing property should have failed\n");
4292
4293 /* Try to modify an existing property - should pass */
4294 prop->value = "modify-property-data-should-pass";
4295 - prop->length = strlen(prop->value);
4296 + prop->length = strlen(prop->value) + 1;
4297 unittest(of_update_property(np, prop) == 0,
4298 "Updating an existing property should have passed\n");
4299
4300 @@ -186,7 +186,7 @@ static void __init of_unittest_dynamic(void)
4301 prop++;
4302 prop->name = "modify-property";
4303 prop->value = "modify-missing-property-data-should-pass";
4304 - prop->length = strlen(prop->value);
4305 + prop->length = strlen(prop->value) + 1;
4306 unittest(of_update_property(np, prop) == 0,
4307 "Updating a missing property should have passed\n");
4308
4309 diff --git a/drivers/opp/core.c b/drivers/opp/core.c
4310 index 92fa94a6dcc1..9c3f5e3df232 100644
4311 --- a/drivers/opp/core.c
4312 +++ b/drivers/opp/core.c
4313 @@ -591,7 +591,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
4314 }
4315
4316 /* Scaling up? Scale voltage before frequency */
4317 - if (freq > old_freq) {
4318 + if (freq >= old_freq) {
4319 ret = _set_opp_voltage(dev, reg, new_supply);
4320 if (ret)
4321 goto restore_voltage;
4322 diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
4323 index c75199538c05..da4b457a14e0 100644
4324 --- a/drivers/pci/host/pci-hyperv.c
4325 +++ b/drivers/pci/host/pci-hyperv.c
4326 @@ -1596,17 +1596,6 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
4327 get_pcichild(hpdev, hv_pcidev_ref_childlist);
4328 spin_lock_irqsave(&hbus->device_list_lock, flags);
4329
4330 - /*
4331 - * When a device is being added to the bus, we set the PCI domain
4332 - * number to be the device serial number, which is non-zero and
4333 - * unique on the same VM. The serial numbers start with 1, and
4334 - * increase by 1 for each device. So device names including this
4335 - * can have shorter names than based on the bus instance UUID.
4336 - * Only the first device serial number is used for domain, so the
4337 - * domain number will not change after the first device is added.
4338 - */
4339 - if (list_empty(&hbus->children))
4340 - hbus->sysdata.domain = desc->ser;
4341 list_add_tail(&hpdev->list_entry, &hbus->children);
4342 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
4343 return hpdev;
4344 diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
4345 index 88e917c9120f..5f892065585e 100644
4346 --- a/drivers/pci/hotplug/pciehp.h
4347 +++ b/drivers/pci/hotplug/pciehp.h
4348 @@ -121,7 +121,7 @@ struct controller *pcie_init(struct pcie_device *dev);
4349 int pcie_init_notification(struct controller *ctrl);
4350 int pciehp_enable_slot(struct slot *p_slot);
4351 int pciehp_disable_slot(struct slot *p_slot);
4352 -void pcie_enable_notification(struct controller *ctrl);
4353 +void pcie_reenable_notification(struct controller *ctrl);
4354 int pciehp_power_on_slot(struct slot *slot);
4355 void pciehp_power_off_slot(struct slot *slot);
4356 void pciehp_get_power_status(struct slot *slot, u8 *status);
4357 diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
4358 index 332b723ff9e6..44a6a63802d5 100644
4359 --- a/drivers/pci/hotplug/pciehp_core.c
4360 +++ b/drivers/pci/hotplug/pciehp_core.c
4361 @@ -283,7 +283,7 @@ static int pciehp_resume(struct pcie_device *dev)
4362 ctrl = get_service_data(dev);
4363
4364 /* reinitialize the chipset's event detection logic */
4365 - pcie_enable_notification(ctrl);
4366 + pcie_reenable_notification(ctrl);
4367
4368 slot = ctrl->slot;
4369
4370 diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
4371 index 18a42f8f5dc5..98ea75aa32c7 100644
4372 --- a/drivers/pci/hotplug/pciehp_hpc.c
4373 +++ b/drivers/pci/hotplug/pciehp_hpc.c
4374 @@ -659,7 +659,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
4375 return handled;
4376 }
4377
4378 -void pcie_enable_notification(struct controller *ctrl)
4379 +static void pcie_enable_notification(struct controller *ctrl)
4380 {
4381 u16 cmd, mask;
4382
4383 @@ -697,6 +697,17 @@ void pcie_enable_notification(struct controller *ctrl)
4384 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
4385 }
4386
4387 +void pcie_reenable_notification(struct controller *ctrl)
4388 +{
4389 + /*
4390 + * Clear both Presence and Data Link Layer Changed to make sure
4391 + * those events still fire after we have re-enabled them.
4392 + */
4393 + pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
4394 + PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
4395 + pcie_enable_notification(ctrl);
4396 +}
4397 +
4398 static void pcie_disable_notification(struct controller *ctrl)
4399 {
4400 u16 mask;
4401 diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
4402 index b9a131137e64..c816b0683a82 100644
4403 --- a/drivers/pci/pci-driver.c
4404 +++ b/drivers/pci/pci-driver.c
4405 @@ -753,10 +753,11 @@ static int pci_pm_suspend(struct device *dev)
4406 * better to resume the device from runtime suspend here.
4407 */
4408 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
4409 - !pci_dev_keep_suspended(pci_dev))
4410 + !pci_dev_keep_suspended(pci_dev)) {
4411 pm_runtime_resume(dev);
4412 + pci_dev->state_saved = false;
4413 + }
4414
4415 - pci_dev->state_saved = false;
4416 if (pm->suspend) {
4417 pci_power_t prev = pci_dev->current_state;
4418 int error;
4419 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
4420 index ac91b6fd0bcd..73ac02796ba9 100644
4421 --- a/drivers/pci/probe.c
4422 +++ b/drivers/pci/probe.c
4423 @@ -2638,7 +2638,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
4424 for_each_pci_bridge(dev, bus) {
4425 cmax = max;
4426 max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
4427 - used_buses += cmax - max;
4428 +
4429 + /*
4430 + * Reserve one bus for each bridge now to avoid extending
4431 + * hotplug bridges too much during the second scan below.
4432 + */
4433 + used_buses++;
4434 + if (cmax - max > 1)
4435 + used_buses += cmax - max - 1;
4436 }
4437
4438 /* Scan bridges that need to be reconfigured */
4439 @@ -2661,12 +2668,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
4440 * bridges if any.
4441 */
4442 buses = available_buses / hotplug_bridges;
4443 - buses = min(buses, available_buses - used_buses);
4444 + buses = min(buses, available_buses - used_buses + 1);
4445 }
4446
4447 cmax = max;
4448 max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
4449 - used_buses += max - cmax;
4450 + /* One bus is already accounted so don't add it again */
4451 + if (max - cmax > 1)
4452 + used_buses += max - cmax - 1;
4453 }
4454
4455 /*
4456 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
4457 index 2990ad1e7c99..785a29ba4f51 100644
4458 --- a/drivers/pci/quirks.c
4459 +++ b/drivers/pci/quirks.c
4460 @@ -4230,11 +4230,29 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4461 * 0xa290-0xa29f PCI Express Root port #{0-16}
4462 * 0xa2e7-0xa2ee PCI Express Root port #{17-24}
4463 *
4464 + * Mobile chipsets are also affected, 7th & 8th Generation
4465 + * Specification update confirms ACS errata 22, status no fix: (7th Generation
4466 + * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel
4467 + * Processor Family I/O for U Quad Core Platforms Specification Update,
4468 + * August 2017, Revision 002, Document#: 334660-002)[6]
4469 + * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O
4470 + * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U
4471 + * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7]
4472 + *
4473 + * 0x9d10-0x9d1b PCI Express Root port #{1-12}
4474 + *
4475 + * The 300 series chipset suffers from the same bug so include those root
4476 + * ports here as well.
4477 + *
4478 + * 0xa32c-0xa343 PCI Express Root port #{0-24}
4479 + *
4480 * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
4481 * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
4482 * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
4483 * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
4484 * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
4485 + * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html
4486 + * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html
4487 */
4488 static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4489 {
4490 @@ -4244,6 +4262,8 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4491 switch (dev->device) {
4492 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
4493 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
4494 + case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
4495 + case 0xa32c ... 0xa343: /* 300 series */
4496 return true;
4497 }
4498
4499 diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
4500 index b601039d6c69..c4aa411f5935 100644
4501 --- a/drivers/pinctrl/devicetree.c
4502 +++ b/drivers/pinctrl/devicetree.c
4503 @@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
4504 }
4505
4506 static int dt_to_map_one_config(struct pinctrl *p,
4507 - struct pinctrl_dev *pctldev,
4508 + struct pinctrl_dev *hog_pctldev,
4509 const char *statename,
4510 struct device_node *np_config)
4511 {
4512 + struct pinctrl_dev *pctldev = NULL;
4513 struct device_node *np_pctldev;
4514 const struct pinctrl_ops *ops;
4515 int ret;
4516 @@ -123,8 +124,10 @@ static int dt_to_map_one_config(struct pinctrl *p,
4517 return -EPROBE_DEFER;
4518 }
4519 /* If we're creating a hog we can use the passed pctldev */
4520 - if (pctldev && (np_pctldev == p->dev->of_node))
4521 + if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
4522 + pctldev = hog_pctldev;
4523 break;
4524 + }
4525 pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
4526 if (pctldev)
4527 break;
4528 diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
4529 index 5b63248c8209..7bef929bd7fe 100644
4530 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
4531 +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
4532 @@ -679,12 +679,13 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
4533 writel(1 << hwirq,
4534 info->base +
4535 IRQ_STATUS + 4 * i);
4536 - continue;
4537 + goto update_status;
4538 }
4539 }
4540
4541 generic_handle_irq(virq);
4542
4543 +update_status:
4544 /* Update status in case a new IRQ appears */
4545 spin_lock_irqsave(&info->irq_lock, flags);
4546 status = readl_relaxed(info->base +
4547 diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
4548 index 90c274490181..4f4ae66a0ee3 100644
4549 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
4550 +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
4551 @@ -105,12 +105,12 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
4552 EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
4553 EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
4554 EXYNOS_PIN_BANK_EINTG(7, 0x200, "gpg3", 0x40),
4555 - EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
4556 EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpj0", 0x44),
4557 EXYNOS_PIN_BANK_EINTG(6, 0x260, "gpj1", 0x48),
4558 EXYNOS_PIN_BANK_EINTG(8, 0x280, "gpj2", 0x4c),
4559 EXYNOS_PIN_BANK_EINTG(8, 0x2a0, "gpj3", 0x50),
4560 EXYNOS_PIN_BANK_EINTG(5, 0x2c0, "gpj4", 0x54),
4561 + EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
4562 EXYNOS_PIN_BANK_EINTN(8, 0x2e0, "mp01"),
4563 EXYNOS_PIN_BANK_EINTN(4, 0x300, "mp02"),
4564 EXYNOS_PIN_BANK_EINTN(8, 0x320, "mp03"),
4565 @@ -630,7 +630,6 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst =
4566 EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc3", 0x20),
4567 EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc1", 0x24),
4568 EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc2", 0x28),
4569 - EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"),
4570 EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x2c),
4571 EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpe0", 0x30),
4572 EXYNOS_PIN_BANK_EINTG(2, 0x1C0, "gpe1", 0x34),
4573 @@ -641,6 +640,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst =
4574 EXYNOS_PIN_BANK_EINTG(2, 0x260, "gpg2", 0x48),
4575 EXYNOS_PIN_BANK_EINTG(4, 0x280, "gph0", 0x4c),
4576 EXYNOS_PIN_BANK_EINTG(8, 0x2A0, "gph1", 0x50),
4577 + EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"),
4578 EXYNOS_PIN_BANK_EINTN(8, 0x2C0, "gpm7"),
4579 EXYNOS_PIN_BANK_EINTN(6, 0x2E0, "gpy0"),
4580 EXYNOS_PIN_BANK_EINTN(4, 0x300, "gpy1"),
4581 diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
4582 index 3682e1539251..31c8b8c49e45 100644
4583 --- a/drivers/platform/chrome/cros_ec_lpc.c
4584 +++ b/drivers/platform/chrome/cros_ec_lpc.c
4585 @@ -435,7 +435,13 @@ static int __init cros_ec_lpc_init(void)
4586 int ret;
4587 acpi_status status;
4588
4589 - if (!dmi_check_system(cros_ec_lpc_dmi_table)) {
4590 + status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device,
4591 + &cros_ec_lpc_acpi_device_found, NULL);
4592 + if (ACPI_FAILURE(status))
4593 + pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME);
4594 +
4595 + if (!cros_ec_lpc_acpi_device_found &&
4596 + !dmi_check_system(cros_ec_lpc_dmi_table)) {
4597 pr_err(DRV_NAME ": unsupported system.\n");
4598 return -ENODEV;
4599 }
4600 @@ -450,11 +456,6 @@ static int __init cros_ec_lpc_init(void)
4601 return ret;
4602 }
4603
4604 - status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device,
4605 - &cros_ec_lpc_acpi_device_found, NULL);
4606 - if (ACPI_FAILURE(status))
4607 - pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME);
4608 -
4609 if (!cros_ec_lpc_acpi_device_found) {
4610 /* Register the device, and it'll get hooked up automatically */
4611 ret = platform_device_register(&cros_ec_lpc_device);
4612 diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
4613 index 5d6ed1507d29..5561b9e190f8 100644
4614 --- a/drivers/pwm/pwm-lpss-platform.c
4615 +++ b/drivers/pwm/pwm-lpss-platform.c
4616 @@ -74,6 +74,10 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev)
4617 return pwm_lpss_remove(lpwm);
4618 }
4619
4620 +static SIMPLE_DEV_PM_OPS(pwm_lpss_platform_pm_ops,
4621 + pwm_lpss_suspend,
4622 + pwm_lpss_resume);
4623 +
4624 static const struct acpi_device_id pwm_lpss_acpi_match[] = {
4625 { "80860F09", (unsigned long)&pwm_lpss_byt_info },
4626 { "80862288", (unsigned long)&pwm_lpss_bsw_info },
4627 @@ -86,6 +90,7 @@ static struct platform_driver pwm_lpss_driver_platform = {
4628 .driver = {
4629 .name = "pwm-lpss",
4630 .acpi_match_table = pwm_lpss_acpi_match,
4631 + .pm = &pwm_lpss_platform_pm_ops,
4632 },
4633 .probe = pwm_lpss_probe_platform,
4634 .remove = pwm_lpss_remove_platform,
4635 diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
4636 index 8db0d40ccacd..4721a264bac2 100644
4637 --- a/drivers/pwm/pwm-lpss.c
4638 +++ b/drivers/pwm/pwm-lpss.c
4639 @@ -32,10 +32,13 @@
4640 /* Size of each PWM register space if multiple */
4641 #define PWM_SIZE 0x400
4642
4643 +#define MAX_PWMS 4
4644 +
4645 struct pwm_lpss_chip {
4646 struct pwm_chip chip;
4647 void __iomem *regs;
4648 const struct pwm_lpss_boardinfo *info;
4649 + u32 saved_ctrl[MAX_PWMS];
4650 };
4651
4652 static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
4653 @@ -177,6 +180,9 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
4654 unsigned long c;
4655 int ret;
4656
4657 + if (WARN_ON(info->npwm > MAX_PWMS))
4658 + return ERR_PTR(-ENODEV);
4659 +
4660 lpwm = devm_kzalloc(dev, sizeof(*lpwm), GFP_KERNEL);
4661 if (!lpwm)
4662 return ERR_PTR(-ENOMEM);
4663 @@ -212,6 +218,30 @@ int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
4664 }
4665 EXPORT_SYMBOL_GPL(pwm_lpss_remove);
4666
4667 +int pwm_lpss_suspend(struct device *dev)
4668 +{
4669 + struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
4670 + int i;
4671 +
4672 + for (i = 0; i < lpwm->info->npwm; i++)
4673 + lpwm->saved_ctrl[i] = readl(lpwm->regs + i * PWM_SIZE + PWM);
4674 +
4675 + return 0;
4676 +}
4677 +EXPORT_SYMBOL_GPL(pwm_lpss_suspend);
4678 +
4679 +int pwm_lpss_resume(struct device *dev)
4680 +{
4681 + struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
4682 + int i;
4683 +
4684 + for (i = 0; i < lpwm->info->npwm; i++)
4685 + writel(lpwm->saved_ctrl[i], lpwm->regs + i * PWM_SIZE + PWM);
4686 +
4687 + return 0;
4688 +}
4689 +EXPORT_SYMBOL_GPL(pwm_lpss_resume);
4690 +
4691 MODULE_DESCRIPTION("PWM driver for Intel LPSS");
4692 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
4693 MODULE_LICENSE("GPL v2");
4694 diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
4695 index 98306bb02cfe..7a4238ad1fcb 100644
4696 --- a/drivers/pwm/pwm-lpss.h
4697 +++ b/drivers/pwm/pwm-lpss.h
4698 @@ -28,5 +28,7 @@ struct pwm_lpss_boardinfo {
4699 struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
4700 const struct pwm_lpss_boardinfo *info);
4701 int pwm_lpss_remove(struct pwm_lpss_chip *lpwm);
4702 +int pwm_lpss_suspend(struct device *dev);
4703 +int pwm_lpss_resume(struct device *dev);
4704
4705 #endif /* __PWM_LPSS_H */
4706 diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
4707 index cbbafdcaaecb..56b14c27e275 100644
4708 --- a/drivers/remoteproc/qcom_q6v5_pil.c
4709 +++ b/drivers/remoteproc/qcom_q6v5_pil.c
4710 @@ -761,13 +761,11 @@ static int q6v5_start(struct rproc *rproc)
4711 }
4712
4713 /* Assign MBA image access in DDR to q6 */
4714 - xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
4715 - qproc->mba_phys,
4716 - qproc->mba_size);
4717 - if (xfermemop_ret) {
4718 + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
4719 + qproc->mba_phys, qproc->mba_size);
4720 + if (ret) {
4721 dev_err(qproc->dev,
4722 - "assigning Q6 access to mba memory failed: %d\n",
4723 - xfermemop_ret);
4724 + "assigning Q6 access to mba memory failed: %d\n", ret);
4725 goto disable_active_clks;
4726 }
4727
4728 diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
4729 index 5ce9bf7b897d..f63adcd95eb0 100644
4730 --- a/drivers/rpmsg/qcom_smd.c
4731 +++ b/drivers/rpmsg/qcom_smd.c
4732 @@ -1100,12 +1100,12 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
4733 void *info;
4734 int ret;
4735
4736 - channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL);
4737 + channel = kzalloc(sizeof(*channel), GFP_KERNEL);
4738 if (!channel)
4739 return ERR_PTR(-ENOMEM);
4740
4741 channel->edge = edge;
4742 - channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL);
4743 + channel->name = kstrdup(name, GFP_KERNEL);
4744 if (!channel->name)
4745 return ERR_PTR(-ENOMEM);
4746
4747 @@ -1156,8 +1156,8 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
4748 return channel;
4749
4750 free_name_and_channel:
4751 - devm_kfree(&edge->dev, channel->name);
4752 - devm_kfree(&edge->dev, channel);
4753 + kfree(channel->name);
4754 + kfree(channel);
4755
4756 return ERR_PTR(ret);
4757 }
4758 @@ -1378,13 +1378,13 @@ static int qcom_smd_parse_edge(struct device *dev,
4759 */
4760 static void qcom_smd_edge_release(struct device *dev)
4761 {
4762 - struct qcom_smd_channel *channel;
4763 + struct qcom_smd_channel *channel, *tmp;
4764 struct qcom_smd_edge *edge = to_smd_edge(dev);
4765
4766 - list_for_each_entry(channel, &edge->channels, list) {
4767 - SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
4768 - SET_RX_CHANNEL_INFO(channel, head, 0);
4769 - SET_RX_CHANNEL_INFO(channel, tail, 0);
4770 + list_for_each_entry_safe(channel, tmp, &edge->channels, list) {
4771 + list_del(&channel->list);
4772 + kfree(channel->name);
4773 + kfree(channel);
4774 }
4775
4776 kfree(edge);
4777 diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
4778 index 2e6fb275acc8..2cd5a7b1a2e3 100644
4779 --- a/drivers/rtc/rtc-sun6i.c
4780 +++ b/drivers/rtc/rtc-sun6i.c
4781 @@ -74,7 +74,7 @@
4782 #define SUN6I_ALARM_CONFIG_WAKEUP BIT(0)
4783
4784 #define SUN6I_LOSC_OUT_GATING 0x0060
4785 -#define SUN6I_LOSC_OUT_GATING_EN BIT(0)
4786 +#define SUN6I_LOSC_OUT_GATING_EN_OFFSET 0
4787
4788 /*
4789 * Get date values
4790 @@ -255,7 +255,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
4791 &clkout_name);
4792 rtc->ext_losc = clk_register_gate(NULL, clkout_name, rtc->hw.init->name,
4793 0, rtc->base + SUN6I_LOSC_OUT_GATING,
4794 - SUN6I_LOSC_OUT_GATING_EN, 0,
4795 + SUN6I_LOSC_OUT_GATING_EN_OFFSET, 0,
4796 &rtc->lock);
4797 if (IS_ERR(rtc->ext_losc)) {
4798 pr_crit("Couldn't register the LOSC external gate\n");
4799 diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
4800 index 18c4f933e8b9..b415ba42ca73 100644
4801 --- a/drivers/s390/scsi/zfcp_dbf.c
4802 +++ b/drivers/s390/scsi/zfcp_dbf.c
4803 @@ -664,6 +664,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
4804 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
4805 }
4806
4807 +/**
4808 + * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
4809 + * @tag: Identifier for event.
4810 + * @adapter: Pointer to zfcp adapter as context for this event.
4811 + * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
4812 + * @ret: Return value of calling function.
4813 + *
4814 + * This SCSI trace variant does not depend on any of:
4815 + * scsi_cmnd, zfcp_fsf_req, scsi_device.
4816 + */
4817 +void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
4818 + unsigned int scsi_id, int ret)
4819 +{
4820 + struct zfcp_dbf *dbf = adapter->dbf;
4821 + struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
4822 + unsigned long flags;
4823 + static int const level = 1;
4824 +
4825 + if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
4826 + return;
4827 +
4828 + spin_lock_irqsave(&dbf->scsi_lock, flags);
4829 + memset(rec, 0, sizeof(*rec));
4830 +
4831 + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
4832 + rec->id = ZFCP_DBF_SCSI_CMND;
4833 + rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
4834 + rec->scsi_retries = ~0;
4835 + rec->scsi_allowed = ~0;
4836 + rec->fcp_rsp_info = ~0;
4837 + rec->scsi_id = scsi_id;
4838 + rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
4839 + rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
4840 + rec->host_scribble = ~0;
4841 + memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
4842 +
4843 + debug_event(dbf->scsi, level, rec, sizeof(*rec));
4844 + spin_unlock_irqrestore(&dbf->scsi_lock, flags);
4845 +}
4846 +
4847 static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
4848 {
4849 struct debug_info *d;
4850 diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
4851 index 1d91a32db08e..69dfb328dba4 100644
4852 --- a/drivers/s390/scsi/zfcp_erp.c
4853 +++ b/drivers/s390/scsi/zfcp_erp.c
4854 @@ -35,11 +35,28 @@ enum zfcp_erp_steps {
4855 ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
4856 };
4857
4858 +/**
4859 + * enum zfcp_erp_act_type - Type of ERP action object.
4860 + * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
4861 + * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
4862 + * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
4863 + * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
4864 + * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
4865 + * either of the first four enum values.
4866 + * Used to indicate that an ERP action could not be
4867 + * set up despite a detected need for some recovery.
4868 + * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
4869 + * either of the first four enum values.
4870 + * Used to indicate that ERP not needed because
4871 + * the object has ZFCP_STATUS_COMMON_ERP_FAILED.
4872 + */
4873 enum zfcp_erp_act_type {
4874 ZFCP_ERP_ACTION_REOPEN_LUN = 1,
4875 ZFCP_ERP_ACTION_REOPEN_PORT = 2,
4876 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
4877 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
4878 + ZFCP_ERP_ACTION_NONE = 0xc0,
4879 + ZFCP_ERP_ACTION_FAILED = 0xe0,
4880 };
4881
4882 enum zfcp_erp_act_state {
4883 @@ -126,6 +143,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
4884 }
4885 }
4886
4887 +static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
4888 + struct zfcp_port *port,
4889 + struct scsi_device *sdev)
4890 +{
4891 + int need = want;
4892 + struct zfcp_scsi_dev *zsdev;
4893 +
4894 + switch (want) {
4895 + case ZFCP_ERP_ACTION_REOPEN_LUN:
4896 + zsdev = sdev_to_zfcp(sdev);
4897 + if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
4898 + need = 0;
4899 + break;
4900 + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
4901 + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
4902 + need = 0;
4903 + break;
4904 + case ZFCP_ERP_ACTION_REOPEN_PORT:
4905 + if (atomic_read(&port->status) &
4906 + ZFCP_STATUS_COMMON_ERP_FAILED) {
4907 + need = 0;
4908 + /* ensure propagation of failed status to new devices */
4909 + zfcp_erp_set_port_status(
4910 + port, ZFCP_STATUS_COMMON_ERP_FAILED);
4911 + }
4912 + break;
4913 + case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
4914 + if (atomic_read(&adapter->status) &
4915 + ZFCP_STATUS_COMMON_ERP_FAILED) {
4916 + need = 0;
4917 + /* ensure propagation of failed status to new devices */
4918 + zfcp_erp_set_adapter_status(
4919 + adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
4920 + }
4921 + break;
4922 + default:
4923 + need = 0;
4924 + break;
4925 + }
4926 +
4927 + return need;
4928 +}
4929 +
4930 static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
4931 struct zfcp_port *port,
4932 struct scsi_device *sdev)
4933 @@ -249,16 +309,27 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
4934 int retval = 1, need;
4935 struct zfcp_erp_action *act;
4936
4937 - if (!adapter->erp_thread)
4938 - return -EIO;
4939 + need = zfcp_erp_handle_failed(want, adapter, port, sdev);
4940 + if (!need) {
4941 + need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */
4942 + goto out;
4943 + }
4944 +
4945 + if (!adapter->erp_thread) {
4946 + need = ZFCP_ERP_ACTION_NONE; /* marker for trace */
4947 + retval = -EIO;
4948 + goto out;
4949 + }
4950
4951 need = zfcp_erp_required_act(want, adapter, port, sdev);
4952 if (!need)
4953 goto out;
4954
4955 act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
4956 - if (!act)
4957 + if (!act) {
4958 + need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */
4959 goto out;
4960 + }
4961 atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
4962 ++adapter->erp_total_count;
4963 list_add_tail(&act->list, &adapter->erp_ready_head);
4964 @@ -269,18 +340,32 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
4965 return retval;
4966 }
4967
4968 +void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
4969 + u64 port_name, u32 port_id)
4970 +{
4971 + unsigned long flags;
4972 + static /* don't waste stack */ struct zfcp_port tmpport;
4973 +
4974 + write_lock_irqsave(&adapter->erp_lock, flags);
4975 + /* Stand-in zfcp port with fields just good enough for
4976 + * zfcp_dbf_rec_trig() and zfcp_dbf_set_common().
4977 + * Under lock because tmpport is static.
4978 + */
4979 + atomic_set(&tmpport.status, -1); /* unknown */
4980 + tmpport.wwpn = port_name;
4981 + tmpport.d_id = port_id;
4982 + zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
4983 + ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
4984 + ZFCP_ERP_ACTION_NONE);
4985 + write_unlock_irqrestore(&adapter->erp_lock, flags);
4986 +}
4987 +
4988 static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
4989 int clear_mask, char *id)
4990 {
4991 zfcp_erp_adapter_block(adapter, clear_mask);
4992 zfcp_scsi_schedule_rports_block(adapter);
4993
4994 - /* ensure propagation of failed status to new devices */
4995 - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
4996 - zfcp_erp_set_adapter_status(adapter,
4997 - ZFCP_STATUS_COMMON_ERP_FAILED);
4998 - return -EIO;
4999 - }
5000 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
5001 adapter, NULL, NULL, id, 0);
5002 }
5003 @@ -299,12 +384,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
5004 zfcp_scsi_schedule_rports_block(adapter);
5005
5006 write_lock_irqsave(&adapter->erp_lock, flags);
5007 - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
5008 - zfcp_erp_set_adapter_status(adapter,
5009 - ZFCP_STATUS_COMMON_ERP_FAILED);
5010 - else
5011 - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
5012 - NULL, NULL, id, 0);
5013 + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
5014 + NULL, NULL, id, 0);
5015 write_unlock_irqrestore(&adapter->erp_lock, flags);
5016 }
5017
5018 @@ -345,9 +426,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
5019 zfcp_erp_port_block(port, clear);
5020 zfcp_scsi_schedule_rport_block(port);
5021
5022 - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
5023 - return;
5024 -
5025 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
5026 port->adapter, port, NULL, id, 0);
5027 }
5028 @@ -373,12 +451,6 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
5029 zfcp_erp_port_block(port, clear);
5030 zfcp_scsi_schedule_rport_block(port);
5031
5032 - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
5033 - /* ensure propagation of failed status to new devices */
5034 - zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
5035 - return -EIO;
5036 - }
5037 -
5038 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
5039 port->adapter, port, NULL, id, 0);
5040 }
5041 @@ -418,9 +490,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
5042
5043 zfcp_erp_lun_block(sdev, clear);
5044
5045 - if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
5046 - return;
5047 -
5048 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
5049 zfcp_sdev->port, sdev, id, act_status);
5050 }
5051 diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
5052 index e5eed8aac0ce..65d16747c301 100644
5053 --- a/drivers/s390/scsi/zfcp_ext.h
5054 +++ b/drivers/s390/scsi/zfcp_ext.h
5055 @@ -52,10 +52,15 @@ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
5056 extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
5057 extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
5058 struct zfcp_fsf_req *);
5059 +extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
5060 + unsigned int scsi_id, int ret);
5061
5062 /* zfcp_erp.c */
5063 extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
5064 extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
5065 +extern void zfcp_erp_port_forced_no_port_dbf(char *id,
5066 + struct zfcp_adapter *adapter,
5067 + u64 port_name, u32 port_id);
5068 extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
5069 extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
5070 extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
5071 diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
5072 index 22f9562f415c..0b6f51424745 100644
5073 --- a/drivers/s390/scsi/zfcp_scsi.c
5074 +++ b/drivers/s390/scsi/zfcp_scsi.c
5075 @@ -181,6 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
5076 if (abrt_req)
5077 break;
5078
5079 + zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL);
5080 zfcp_erp_wait(adapter);
5081 ret = fc_block_scsi_eh(scpnt);
5082 if (ret) {
5083 @@ -277,6 +278,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
5084 if (fsf_req)
5085 break;
5086
5087 + zfcp_dbf_scsi_devreset("wait", scpnt, tm_flags, NULL);
5088 zfcp_erp_wait(adapter);
5089 ret = fc_block_scsi_eh(scpnt);
5090 if (ret) {
5091 @@ -323,15 +325,16 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
5092 {
5093 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
5094 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
5095 - int ret;
5096 + int ret = SUCCESS, fc_ret;
5097
5098 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
5099 zfcp_erp_wait(adapter);
5100 - ret = fc_block_scsi_eh(scpnt);
5101 - if (ret)
5102 - return ret;
5103 + fc_ret = fc_block_scsi_eh(scpnt);
5104 + if (fc_ret)
5105 + ret = fc_ret;
5106
5107 - return SUCCESS;
5108 + zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
5109 + return ret;
5110 }
5111
5112 struct scsi_transport_template *zfcp_scsi_transport_template;
5113 @@ -602,6 +605,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
5114 if (port) {
5115 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
5116 put_device(&port->dev);
5117 + } else {
5118 + zfcp_erp_port_forced_no_port_dbf(
5119 + "sctrpin", adapter,
5120 + rport->port_name /* zfcp_scsi_rport_register */,
5121 + rport->port_id /* zfcp_scsi_rport_register */);
5122 }
5123 }
5124
5125 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
5126 index 3a9eca163db8..b92f86acb8bb 100644
5127 --- a/drivers/scsi/hpsa.c
5128 +++ b/drivers/scsi/hpsa.c
5129 @@ -8869,7 +8869,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
5130 kfree(options);
5131 }
5132
5133 -static void hpsa_shutdown(struct pci_dev *pdev)
5134 +static void __hpsa_shutdown(struct pci_dev *pdev)
5135 {
5136 struct ctlr_info *h;
5137
5138 @@ -8884,6 +8884,12 @@ static void hpsa_shutdown(struct pci_dev *pdev)
5139 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
5140 }
5141
5142 +static void hpsa_shutdown(struct pci_dev *pdev)
5143 +{
5144 + __hpsa_shutdown(pdev);
5145 + pci_disable_device(pdev);
5146 +}
5147 +
5148 static void hpsa_free_device_info(struct ctlr_info *h)
5149 {
5150 int i;
5151 @@ -8927,7 +8933,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
5152 scsi_remove_host(h->scsi_host); /* init_one 8 */
5153 /* includes hpsa_free_irqs - init_one 4 */
5154 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
5155 - hpsa_shutdown(pdev);
5156 + __hpsa_shutdown(pdev);
5157
5158 hpsa_free_device_info(h); /* scan */
5159
5160 diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
5161 index 9e914f9c3ffb..05abe5aaab7f 100644
5162 --- a/drivers/scsi/qla2xxx/qla_gs.c
5163 +++ b/drivers/scsi/qla2xxx/qla_gs.c
5164 @@ -3915,7 +3915,6 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
5165 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
5166 continue;
5167 fcport->scan_state = QLA_FCPORT_FOUND;
5168 - fcport->d_id.b24 = rp->id.b24;
5169 found = true;
5170 /*
5171 * If device was not a fabric device before.
5172 @@ -3923,7 +3922,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
5173 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
5174 qla2x00_clear_loop_id(fcport);
5175 fcport->flags |= FCF_FABRIC_DEVICE;
5176 + } else if (fcport->d_id.b24 != rp->id.b24) {
5177 + qlt_schedule_sess_for_deletion(fcport);
5178 }
5179 + fcport->d_id.b24 = rp->id.b24;
5180 break;
5181 }
5182
5183 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
5184 index 8f55dd44adae..636960ad029a 100644
5185 --- a/drivers/scsi/qla2xxx/qla_init.c
5186 +++ b/drivers/scsi/qla2xxx/qla_init.c
5187 @@ -5037,7 +5037,8 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5188 return;
5189
5190 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
5191 - fcport->fp_speed > ha->link_data_rate)
5192 + fcport->fp_speed > ha->link_data_rate ||
5193 + !ha->flags.gpsc_supported)
5194 return;
5195
5196 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
5197 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
5198 index a3dc83f9444d..68560a097ae1 100644
5199 --- a/drivers/scsi/qla2xxx/qla_isr.c
5200 +++ b/drivers/scsi/qla2xxx/qla_isr.c
5201 @@ -2494,8 +2494,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
5202 ox_id = le16_to_cpu(sts24->ox_id);
5203 par_sense_len = sizeof(sts24->data);
5204 /* Valid values of the retry delay timer are 0x1-0xffef */
5205 - if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
5206 - retry_delay = sts24->retry_delay;
5207 + if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
5208 + retry_delay = sts24->retry_delay & 0x3fff;
5209 + ql_dbg(ql_dbg_io, sp->vha, 0x3033,
5210 + "%s: scope=%#x retry_delay=%#x\n", __func__,
5211 + sts24->retry_delay >> 14, retry_delay);
5212 + }
5213 } else {
5214 if (scsi_status & SS_SENSE_LEN_VALID)
5215 sense_len = le16_to_cpu(sts->req_sense_length);
5216 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
5217 index 025dc2d3f3de..0266c4d07bc9 100644
5218 --- a/drivers/scsi/qla2xxx/qla_target.c
5219 +++ b/drivers/scsi/qla2xxx/qla_target.c
5220 @@ -1230,7 +1230,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
5221 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
5222 {
5223 struct qla_tgt *tgt = sess->tgt;
5224 - struct qla_hw_data *ha = sess->vha->hw;
5225 unsigned long flags;
5226
5227 if (sess->disc_state == DSC_DELETE_PEND)
5228 @@ -1247,16 +1246,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
5229 return;
5230 }
5231
5232 - spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5233 if (sess->deleted == QLA_SESS_DELETED)
5234 sess->logout_on_delete = 0;
5235
5236 + spin_lock_irqsave(&sess->vha->work_lock, flags);
5237 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5238 - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5239 + spin_unlock_irqrestore(&sess->vha->work_lock, flags);
5240 return;
5241 }
5242 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
5243 - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5244 + spin_unlock_irqrestore(&sess->vha->work_lock, flags);
5245
5246 sess->disc_state = DSC_DELETE_PEND;
5247
5248 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
5249 index 656c98e116a9..e086bb63da46 100644
5250 --- a/drivers/scsi/scsi_debug.c
5251 +++ b/drivers/scsi/scsi_debug.c
5252 @@ -5506,9 +5506,9 @@ static void __exit scsi_debug_exit(void)
5253 int k = sdebug_add_host;
5254
5255 stop_all_queued();
5256 - free_all_queued();
5257 for (; k; k--)
5258 sdebug_remove_adapter();
5259 + free_all_queued();
5260 driver_unregister(&sdebug_driverfs_driver);
5261 bus_unregister(&pseudo_lld_bus);
5262 root_device_unregister(pseudo_primary);
5263 diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
5264 index 53efc386b1ad..df7f30a425c6 100644
5265 --- a/drivers/soc/rockchip/pm_domains.c
5266 +++ b/drivers/soc/rockchip/pm_domains.c
5267 @@ -255,7 +255,7 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
5268 return;
5269 else if (pd->info->pwr_w_mask)
5270 regmap_write(pmu->regmap, pmu->info->pwr_offset,
5271 - on ? pd->info->pwr_mask :
5272 + on ? pd->info->pwr_w_mask :
5273 (pd->info->pwr_mask | pd->info->pwr_w_mask));
5274 else
5275 regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
5276 diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
5277 index a4d6a0e2e993..23ad4f9f2143 100644
5278 --- a/drivers/thermal/broadcom/bcm2835_thermal.c
5279 +++ b/drivers/thermal/broadcom/bcm2835_thermal.c
5280 @@ -213,8 +213,8 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
5281 rate = clk_get_rate(data->clk);
5282 if ((rate < 1920000) || (rate > 5000000))
5283 dev_warn(&pdev->dev,
5284 - "Clock %pCn running at %pCr Hz is outside of the recommended range: 1.92 to 5MHz\n",
5285 - data->clk, data->clk);
5286 + "Clock %pCn running at %lu Hz is outside of the recommended range: 1.92 to 5MHz\n",
5287 + data->clk, rate);
5288
5289 /* register of thermal sensor and get info from DT */
5290 tz = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
5291 diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
5292 index a4f82ec665fe..2051a5309851 100644
5293 --- a/drivers/tty/serial/sh-sci.c
5294 +++ b/drivers/tty/serial/sh-sci.c
5295 @@ -2890,16 +2890,15 @@ static void serial_console_write(struct console *co, const char *s,
5296 unsigned long flags;
5297 int locked = 1;
5298
5299 - local_irq_save(flags);
5300 #if defined(SUPPORT_SYSRQ)
5301 if (port->sysrq)
5302 locked = 0;
5303 else
5304 #endif
5305 if (oops_in_progress)
5306 - locked = spin_trylock(&port->lock);
5307 + locked = spin_trylock_irqsave(&port->lock, flags);
5308 else
5309 - spin_lock(&port->lock);
5310 + spin_lock_irqsave(&port->lock, flags);
5311
5312 /* first save SCSCR then disable interrupts, keep clock source */
5313 ctrl = serial_port_in(port, SCSCR);
5314 @@ -2919,8 +2918,7 @@ static void serial_console_write(struct console *co, const char *s,
5315 serial_port_out(port, SCSCR, ctrl);
5316
5317 if (locked)
5318 - spin_unlock(&port->lock);
5319 - local_irq_restore(flags);
5320 + spin_unlock_irqrestore(&port->lock, flags);
5321 }
5322
5323 static int serial_console_setup(struct console *co, char *options)
5324 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
5325 index aa9968d90a48..e3bf65e213cd 100644
5326 --- a/drivers/usb/core/hub.c
5327 +++ b/drivers/usb/core/hub.c
5328 @@ -4551,7 +4551,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
5329 * reset. But only on the first attempt,
5330 * lest we get into a time out/reset loop
5331 */
5332 - if (r == 0 || (r == -ETIMEDOUT && retries == 0))
5333 + if (r == 0 || (r == -ETIMEDOUT &&
5334 + retries == 0 &&
5335 + udev->speed > USB_SPEED_FULL))
5336 break;
5337 }
5338 udev->descriptor.bMaxPacketSize0 =
5339 diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
5340 index 734a9158946b..e55304d5cf07 100644
5341 --- a/drivers/video/backlight/as3711_bl.c
5342 +++ b/drivers/video/backlight/as3711_bl.c
5343 @@ -262,10 +262,10 @@ static int as3711_bl_register(struct platform_device *pdev,
5344 static int as3711_backlight_parse_dt(struct device *dev)
5345 {
5346 struct as3711_bl_pdata *pdata = dev_get_platdata(dev);
5347 - struct device_node *bl =
5348 - of_find_node_by_name(dev->parent->of_node, "backlight"), *fb;
5349 + struct device_node *bl, *fb;
5350 int ret;
5351
5352 + bl = of_get_child_by_name(dev->parent->of_node, "backlight");
5353 if (!bl) {
5354 dev_dbg(dev, "backlight node not found\n");
5355 return -ENODEV;
5356 @@ -279,7 +279,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
5357 if (pdata->su1_max_uA <= 0)
5358 ret = -EINVAL;
5359 if (ret < 0)
5360 - return ret;
5361 + goto err_put_bl;
5362 }
5363
5364 fb = of_parse_phandle(bl, "su2-dev", 0);
5365 @@ -292,7 +292,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
5366 if (pdata->su2_max_uA <= 0)
5367 ret = -EINVAL;
5368 if (ret < 0)
5369 - return ret;
5370 + goto err_put_bl;
5371
5372 if (of_find_property(bl, "su2-feedback-voltage", NULL)) {
5373 pdata->su2_feedback = AS3711_SU2_VOLTAGE;
5374 @@ -314,8 +314,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
5375 pdata->su2_feedback = AS3711_SU2_CURR_AUTO;
5376 count++;
5377 }
5378 - if (count != 1)
5379 - return -EINVAL;
5380 + if (count != 1) {
5381 + ret = -EINVAL;
5382 + goto err_put_bl;
5383 + }
5384
5385 count = 0;
5386 if (of_find_property(bl, "su2-fbprot-lx-sd4", NULL)) {
5387 @@ -334,8 +336,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
5388 pdata->su2_fbprot = AS3711_SU2_GPIO4;
5389 count++;
5390 }
5391 - if (count != 1)
5392 - return -EINVAL;
5393 + if (count != 1) {
5394 + ret = -EINVAL;
5395 + goto err_put_bl;
5396 + }
5397
5398 count = 0;
5399 if (of_find_property(bl, "su2-auto-curr1", NULL)) {
5400 @@ -355,11 +359,20 @@ static int as3711_backlight_parse_dt(struct device *dev)
5401 * At least one su2-auto-curr* must be specified iff
5402 * AS3711_SU2_CURR_AUTO is used
5403 */
5404 - if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO))
5405 - return -EINVAL;
5406 + if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO)) {
5407 + ret = -EINVAL;
5408 + goto err_put_bl;
5409 + }
5410 }
5411
5412 + of_node_put(bl);
5413 +
5414 return 0;
5415 +
5416 +err_put_bl:
5417 + of_node_put(bl);
5418 +
5419 + return ret;
5420 }
5421
5422 static int as3711_backlight_probe(struct platform_device *pdev)
5423 diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
5424 index 7b738d60ecc2..f3aa6088f1d9 100644
5425 --- a/drivers/video/backlight/max8925_bl.c
5426 +++ b/drivers/video/backlight/max8925_bl.c
5427 @@ -116,7 +116,7 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
5428 if (!pdata)
5429 return;
5430
5431 - np = of_find_node_by_name(nproot, "backlight");
5432 + np = of_get_child_by_name(nproot, "backlight");
5433 if (!np) {
5434 dev_err(&pdev->dev, "failed to find backlight node\n");
5435 return;
5436 @@ -125,6 +125,8 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
5437 if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val))
5438 pdata->dual_string = val;
5439
5440 + of_node_put(np);
5441 +
5442 pdev->dev.platform_data = pdata;
5443 }
5444
5445 diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
5446 index 380917c86276..762e3feed097 100644
5447 --- a/drivers/video/backlight/tps65217_bl.c
5448 +++ b/drivers/video/backlight/tps65217_bl.c
5449 @@ -184,11 +184,11 @@ static struct tps65217_bl_pdata *
5450 tps65217_bl_parse_dt(struct platform_device *pdev)
5451 {
5452 struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
5453 - struct device_node *node = of_node_get(tps->dev->of_node);
5454 + struct device_node *node;
5455 struct tps65217_bl_pdata *pdata, *err;
5456 u32 val;
5457
5458 - node = of_find_node_by_name(node, "backlight");
5459 + node = of_get_child_by_name(tps->dev->of_node, "backlight");
5460 if (!node)
5461 return ERR_PTR(-ENODEV);
5462
5463 diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
5464 index 73676eb0244a..c592ca513115 100644
5465 --- a/drivers/video/fbdev/uvesafb.c
5466 +++ b/drivers/video/fbdev/uvesafb.c
5467 @@ -1044,7 +1044,8 @@ static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
5468 info->cmap.len || cmap->start < info->cmap.start)
5469 return -EINVAL;
5470
5471 - entries = kmalloc(sizeof(*entries) * cmap->len, GFP_KERNEL);
5472 + entries = kmalloc_array(cmap->len, sizeof(*entries),
5473 + GFP_KERNEL);
5474 if (!entries)
5475 return -ENOMEM;
5476
5477 diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
5478 index 398d22693234..6e2a9619192d 100644
5479 --- a/drivers/virt/vboxguest/vboxguest_linux.c
5480 +++ b/drivers/virt/vboxguest/vboxguest_linux.c
5481 @@ -121,7 +121,9 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
5482 if (!buf)
5483 return -ENOMEM;
5484
5485 - if (copy_from_user(buf, (void *)arg, hdr.size_in)) {
5486 + *((struct vbg_ioctl_hdr *)buf) = hdr;
5487 + if (copy_from_user(buf + sizeof(hdr), (void *)arg + sizeof(hdr),
5488 + hdr.size_in - sizeof(hdr))) {
5489 ret = -EFAULT;
5490 goto out;
5491 }
5492 diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
5493 index 80a778b02f28..caef0e0fd817 100644
5494 --- a/drivers/w1/w1.c
5495 +++ b/drivers/w1/w1.c
5496 @@ -751,7 +751,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
5497
5498 /* slave modules need to be loaded in a context with unlocked mutex */
5499 mutex_unlock(&dev->mutex);
5500 - request_module("w1-family-0x%02x", rn->family);
5501 + request_module("w1-family-0x%02X", rn->family);
5502 mutex_lock(&dev->mutex);
5503
5504 spin_lock(&w1_flock);
5505 diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
5506 index 762378f1811c..08e4af04d6f2 100644
5507 --- a/drivers/xen/events/events_base.c
5508 +++ b/drivers/xen/events/events_base.c
5509 @@ -628,8 +628,6 @@ static void __unbind_from_irq(unsigned int irq)
5510 xen_irq_info_cleanup(info);
5511 }
5512
5513 - BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
5514 -
5515 xen_free_irq(irq);
5516 }
5517
5518 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
5519 index 775a0f2d0b45..b54a55497216 100644
5520 --- a/fs/btrfs/inode.c
5521 +++ b/fs/btrfs/inode.c
5522 @@ -9475,6 +9475,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
5523 u64 new_idx = 0;
5524 u64 root_objectid;
5525 int ret;
5526 + int ret2;
5527 bool root_log_pinned = false;
5528 bool dest_log_pinned = false;
5529
5530 @@ -9671,7 +9672,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
5531 dest_log_pinned = false;
5532 }
5533 }
5534 - ret = btrfs_end_transaction(trans);
5535 + ret2 = btrfs_end_transaction(trans);
5536 + ret = ret ? ret : ret2;
5537 out_notrans:
5538 if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
5539 up_read(&fs_info->subvol_sem);
5540 diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
5541 index bf779461df13..2e23b953d304 100644
5542 --- a/fs/f2fs/checkpoint.c
5543 +++ b/fs/f2fs/checkpoint.c
5544 @@ -100,8 +100,10 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
5545 * readonly and make sure do not write checkpoint with non-uptodate
5546 * meta page.
5547 */
5548 - if (unlikely(!PageUptodate(page)))
5549 + if (unlikely(!PageUptodate(page))) {
5550 + memset(page_address(page), 0, PAGE_SIZE);
5551 f2fs_stop_checkpoint(sbi, false);
5552 + }
5553 out:
5554 return page;
5555 }
5556 diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
5557 index e0d9e8f27ed2..f8ef04c9f69d 100644
5558 --- a/fs/f2fs/inode.c
5559 +++ b/fs/f2fs/inode.c
5560 @@ -320,10 +320,10 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
5561 make_now:
5562 if (ino == F2FS_NODE_INO(sbi)) {
5563 inode->i_mapping->a_ops = &f2fs_node_aops;
5564 - mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
5565 + mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
5566 } else if (ino == F2FS_META_INO(sbi)) {
5567 inode->i_mapping->a_ops = &f2fs_meta_aops;
5568 - mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
5569 + mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
5570 } else if (S_ISREG(inode->i_mode)) {
5571 inode->i_op = &f2fs_file_inode_operations;
5572 inode->i_fop = &f2fs_file_operations;
5573 diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
5574 index 5854cc4e1d67..be8d1b16b8d1 100644
5575 --- a/fs/f2fs/segment.c
5576 +++ b/fs/f2fs/segment.c
5577 @@ -2020,6 +2020,7 @@ static void write_current_sum_page(struct f2fs_sb_info *sbi,
5578 struct f2fs_summary_block *dst;
5579
5580 dst = (struct f2fs_summary_block *)page_address(page);
5581 + memset(dst, 0, PAGE_SIZE);
5582
5583 mutex_lock(&curseg->curseg_mutex);
5584
5585 @@ -3116,6 +3117,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
5586
5587 page = grab_meta_page(sbi, blkaddr++);
5588 kaddr = (unsigned char *)page_address(page);
5589 + memset(kaddr, 0, PAGE_SIZE);
5590
5591 /* Step 1: write nat cache */
5592 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
5593 @@ -3140,6 +3142,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
5594 if (!page) {
5595 page = grab_meta_page(sbi, blkaddr++);
5596 kaddr = (unsigned char *)page_address(page);
5597 + memset(kaddr, 0, PAGE_SIZE);
5598 written_size = 0;
5599 }
5600 summary = (struct f2fs_summary *)(kaddr + written_size);
5601 diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
5602 index 3325d0769723..492ad0c86fa9 100644
5603 --- a/fs/f2fs/segment.h
5604 +++ b/fs/f2fs/segment.h
5605 @@ -375,6 +375,7 @@ static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
5606 int i;
5607
5608 raw_sit = (struct f2fs_sit_block *)page_address(page);
5609 + memset(raw_sit, 0, PAGE_SIZE);
5610 for (i = 0; i < end - start; i++) {
5611 rs = &raw_sit->entries[i];
5612 se = get_seg_entry(sbi, start + i);
5613 diff --git a/fs/fuse/control.c b/fs/fuse/control.c
5614 index b9ea99c5b5b3..5be0339dcceb 100644
5615 --- a/fs/fuse/control.c
5616 +++ b/fs/fuse/control.c
5617 @@ -211,10 +211,11 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
5618 if (!dentry)
5619 return NULL;
5620
5621 - fc->ctl_dentry[fc->ctl_ndents++] = dentry;
5622 inode = new_inode(fuse_control_sb);
5623 - if (!inode)
5624 + if (!inode) {
5625 + dput(dentry);
5626 return NULL;
5627 + }
5628
5629 inode->i_ino = get_next_ino();
5630 inode->i_mode = mode;
5631 @@ -228,6 +229,9 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
5632 set_nlink(inode, nlink);
5633 inode->i_private = fc;
5634 d_add(dentry, inode);
5635 +
5636 + fc->ctl_dentry[fc->ctl_ndents++] = dentry;
5637 +
5638 return dentry;
5639 }
5640
5641 @@ -284,7 +288,10 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
5642 for (i = fc->ctl_ndents - 1; i >= 0; i--) {
5643 struct dentry *dentry = fc->ctl_dentry[i];
5644 d_inode(dentry)->i_private = NULL;
5645 - d_drop(dentry);
5646 + if (!i) {
5647 + /* Get rid of submounts: */
5648 + d_invalidate(dentry);
5649 + }
5650 dput(dentry);
5651 }
5652 drop_nlink(d_inode(fuse_control_sb->s_root));
5653 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
5654 index 5d06384c2cae..ee6c9baf8158 100644
5655 --- a/fs/fuse/dev.c
5656 +++ b/fs/fuse/dev.c
5657 @@ -381,8 +381,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
5658 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
5659 wake_up(&fc->blocked_waitq);
5660
5661 - if (fc->num_background == fc->congestion_threshold &&
5662 - fc->connected && fc->sb) {
5663 + if (fc->num_background == fc->congestion_threshold && fc->sb) {
5664 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
5665 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
5666 }
5667 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
5668 index 24967382a7b1..7a980b4462d9 100644
5669 --- a/fs/fuse/dir.c
5670 +++ b/fs/fuse/dir.c
5671 @@ -1629,8 +1629,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
5672 return err;
5673
5674 if (attr->ia_valid & ATTR_OPEN) {
5675 - if (fc->atomic_o_trunc)
5676 + /* This is coming from open(..., ... | O_TRUNC); */
5677 + WARN_ON(!(attr->ia_valid & ATTR_SIZE));
5678 + WARN_ON(attr->ia_size != 0);
5679 + if (fc->atomic_o_trunc) {
5680 + /*
5681 + * No need to send request to userspace, since actual
5682 + * truncation has already been done by OPEN. But still
5683 + * need to truncate page cache.
5684 + */
5685 + i_size_write(inode, 0);
5686 + truncate_pagecache(inode, 0);
5687 return 0;
5688 + }
5689 file = NULL;
5690 }
5691
5692 diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
5693 index ef309958e060..9b37cf8142b5 100644
5694 --- a/fs/fuse/inode.c
5695 +++ b/fs/fuse/inode.c
5696 @@ -1179,6 +1179,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
5697 fuse_dev_free(fud);
5698 err_put_conn:
5699 fuse_conn_put(fc);
5700 + sb->s_fs_info = NULL;
5701 err_fput:
5702 fput(file);
5703 err:
5704 diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
5705 index a50d7813e3ea..180b4b616725 100644
5706 --- a/fs/nfs/callback_proc.c
5707 +++ b/fs/nfs/callback_proc.c
5708 @@ -420,11 +420,8 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
5709 return htonl(NFS4ERR_SEQ_FALSE_RETRY);
5710 }
5711
5712 - /* Wraparound */
5713 - if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
5714 - if (args->csa_sequenceid == 1)
5715 - return htonl(NFS4_OK);
5716 - } else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
5717 + /* Note: wraparound relies on seq_nr being of type u32 */
5718 + if (likely(args->csa_sequenceid == slot->seq_nr + 1))
5719 return htonl(NFS4_OK);
5720
5721 /* Misordered request */
5722 diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
5723 index 22dc30a679a0..b6f9d84ba19b 100644
5724 --- a/fs/nfs/nfs4idmap.c
5725 +++ b/fs/nfs/nfs4idmap.c
5726 @@ -343,7 +343,7 @@ static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf,
5727 int id_len;
5728 ssize_t ret;
5729
5730 - id_len = snprintf(id_str, sizeof(id_str), "%u", id);
5731 + id_len = nfs_map_numeric_to_string(id, id_str, sizeof(id_str));
5732 ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap);
5733 if (ret < 0)
5734 return -EINVAL;
5735 @@ -627,7 +627,8 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
5736 if (strcmp(upcall->im_name, im->im_name) != 0)
5737 break;
5738 /* Note: here we store the NUL terminator too */
5739 - len = sprintf(id_str, "%d", im->im_id) + 1;
5740 + len = 1 + nfs_map_numeric_to_string(im->im_id, id_str,
5741 + sizeof(id_str));
5742 ret = nfs_idmap_instantiate(key, authkey, id_str, len);
5743 break;
5744 case IDMAP_CONV_IDTONAME:
5745 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
5746 index b71757e85066..409acdda70dd 100644
5747 --- a/fs/nfs/nfs4proc.c
5748 +++ b/fs/nfs/nfs4proc.c
5749 @@ -751,7 +751,7 @@ static int nfs41_sequence_process(struct rpc_task *task,
5750 * The slot id we used was probably retired. Try again
5751 * using a different slot id.
5752 */
5753 - if (slot->seq_nr < slot->table->target_highest_slotid)
5754 + if (slot->slot_nr < slot->table->target_highest_slotid)
5755 goto session_recover;
5756 goto retry_nowait;
5757 case -NFS4ERR_SEQ_MISORDERED:
5758 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
5759 index 1d048dd95464..cfe535c286c3 100644
5760 --- a/fs/nfsd/nfs4xdr.c
5761 +++ b/fs/nfsd/nfs4xdr.c
5762 @@ -3651,7 +3651,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
5763 nfserr = nfserr_resource;
5764 goto err_no_verf;
5765 }
5766 - maxcount = min_t(u32, readdir->rd_maxcount, INT_MAX);
5767 + maxcount = svc_max_payload(resp->rqstp);
5768 + maxcount = min_t(u32, readdir->rd_maxcount, maxcount);
5769 /*
5770 * Note the rfc defines rd_maxcount as the size of the
5771 * READDIR4resok structure, which includes the verifier above
5772 @@ -3665,7 +3666,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
5773
5774 /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
5775 if (!readdir->rd_dircount)
5776 - readdir->rd_dircount = INT_MAX;
5777 + readdir->rd_dircount = svc_max_payload(resp->rqstp);
5778
5779 readdir->xdr = xdr;
5780 readdir->rd_maxcount = maxcount;
5781 diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
5782 index 04c4ec6483e5..8ae1cd8611cc 100644
5783 --- a/fs/ubifs/journal.c
5784 +++ b/fs/ubifs/journal.c
5785 @@ -1283,10 +1283,11 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
5786 int *new_len)
5787 {
5788 void *buf;
5789 - int err, dlen, compr_type, out_len, old_dlen;
5790 + int err, compr_type;
5791 + u32 dlen, out_len, old_dlen;
5792
5793 out_len = le32_to_cpu(dn->size);
5794 - buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
5795 + buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
5796 if (!buf)
5797 return -ENOMEM;
5798
5799 diff --git a/fs/udf/directory.c b/fs/udf/directory.c
5800 index 0a98a2369738..3835f983cc99 100644
5801 --- a/fs/udf/directory.c
5802 +++ b/fs/udf/directory.c
5803 @@ -152,6 +152,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
5804 sizeof(struct fileIdentDesc));
5805 }
5806 }
5807 + /* Got last entry outside of dir size - fs is corrupted! */
5808 + if (*nf_pos > dir->i_size)
5809 + return NULL;
5810 return fi;
5811 }
5812
5813 diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
5814 index d3558d897a4d..8d69b9134bef 100644
5815 --- a/include/dt-bindings/clock/aspeed-clock.h
5816 +++ b/include/dt-bindings/clock/aspeed-clock.h
5817 @@ -45,7 +45,7 @@
5818 #define ASPEED_RESET_JTAG_MASTER 3
5819 #define ASPEED_RESET_MIC 4
5820 #define ASPEED_RESET_PWM 5
5821 -#define ASPEED_RESET_PCIVGA 6
5822 +#define ASPEED_RESET_PECI 6
5823 #define ASPEED_RESET_I2C 7
5824 #define ASPEED_RESET_AHB 8
5825
5826 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
5827 index 5c4eee043191..7d047465dfc2 100644
5828 --- a/include/linux/blkdev.h
5829 +++ b/include/linux/blkdev.h
5830 @@ -1124,8 +1124,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
5831 if (!q->limits.chunk_sectors)
5832 return q->limits.max_sectors;
5833
5834 - return q->limits.chunk_sectors -
5835 - (offset & (q->limits.chunk_sectors - 1));
5836 + return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
5837 + (offset & (q->limits.chunk_sectors - 1))));
5838 }
5839
5840 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
5841 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
5842 index ab4711c63601..42506e4d1f53 100644
5843 --- a/include/linux/compiler.h
5844 +++ b/include/linux/compiler.h
5845 @@ -21,7 +21,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
5846 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
5847
5848 #define __branch_check__(x, expect, is_constant) ({ \
5849 - int ______r; \
5850 + long ______r; \
5851 static struct ftrace_likely_data \
5852 __attribute__((__aligned__(4))) \
5853 __attribute__((section("_ftrace_annotated_branch"))) \
5854 diff --git a/include/linux/memory.h b/include/linux/memory.h
5855 index 31ca3e28b0eb..a6ddefc60517 100644
5856 --- a/include/linux/memory.h
5857 +++ b/include/linux/memory.h
5858 @@ -38,6 +38,7 @@ struct memory_block {
5859
5860 int arch_get_memory_phys_device(unsigned long start_pfn);
5861 unsigned long memory_block_size_bytes(void);
5862 +int set_memory_block_size_order(unsigned int order);
5863
5864 /* These states are exposed to userspace as text strings in sysfs */
5865 #define MEM_ONLINE (1<<0) /* exposed to userspace */
5866 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
5867 index 3773e26c08c1..bb93a6c693e3 100644
5868 --- a/include/linux/slub_def.h
5869 +++ b/include/linux/slub_def.h
5870 @@ -156,8 +156,12 @@ struct kmem_cache {
5871
5872 #ifdef CONFIG_SYSFS
5873 #define SLAB_SUPPORTS_SYSFS
5874 +void sysfs_slab_unlink(struct kmem_cache *);
5875 void sysfs_slab_release(struct kmem_cache *);
5876 #else
5877 +static inline void sysfs_slab_unlink(struct kmem_cache *s)
5878 +{
5879 +}
5880 static inline void sysfs_slab_release(struct kmem_cache *s)
5881 {
5882 }
5883 diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
5884 index 9fc8a825aa28..ba015efb5312 100644
5885 --- a/include/rdma/ib_verbs.h
5886 +++ b/include/rdma/ib_verbs.h
5887 @@ -3310,11 +3310,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget);
5888 *
5889 * Users can examine the cq structure to determine the actual CQ size.
5890 */
5891 -struct ib_cq *ib_create_cq(struct ib_device *device,
5892 - ib_comp_handler comp_handler,
5893 - void (*event_handler)(struct ib_event *, void *),
5894 - void *cq_context,
5895 - const struct ib_cq_init_attr *cq_attr);
5896 +struct ib_cq *__ib_create_cq(struct ib_device *device,
5897 + ib_comp_handler comp_handler,
5898 + void (*event_handler)(struct ib_event *, void *),
5899 + void *cq_context,
5900 + const struct ib_cq_init_attr *cq_attr,
5901 + const char *caller);
5902 +#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
5903 + __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
5904
5905 /**
5906 * ib_resize_cq - Modifies the capacity of the CQ.
5907 @@ -3734,6 +3737,20 @@ static inline int ib_check_mr_access(int flags)
5908 return 0;
5909 }
5910
5911 +static inline bool ib_access_writable(int access_flags)
5912 +{
5913 + /*
5914 + * We have writable memory backing the MR if any of the following
5915 + * access flags are set. "Local write" and "remote write" obviously
5916 + * require write access. "Remote atomic" can do things like fetch and
5917 + * add, which will modify memory, and "MW bind" can change permissions
5918 + * by binding a window.
5919 + */
5920 + return access_flags &
5921 + (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
5922 + IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
5923 +}
5924 +
5925 /**
5926 * ib_check_mr_status: lightweight check of MR status.
5927 * This routine may provide status checks on a selected
5928 diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
5929 index 3f4c187e435d..eec495e68823 100644
5930 --- a/include/rdma/rdma_vt.h
5931 +++ b/include/rdma/rdma_vt.h
5932 @@ -402,7 +402,7 @@ struct rvt_dev_info {
5933 spinlock_t pending_lock; /* protect pending mmap list */
5934
5935 /* CQ */
5936 - struct kthread_worker *worker; /* per device cq worker */
5937 + struct kthread_worker __rcu *worker; /* per device cq worker */
5938 u32 n_cqs_allocated; /* number of CQs allocated for device */
5939 spinlock_t n_cqs_lock; /* protect count of in use cqs */
5940
5941 diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
5942 index bc1e507be9ff..776308d2fa9e 100644
5943 --- a/kernel/locking/rwsem.c
5944 +++ b/kernel/locking/rwsem.c
5945 @@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem)
5946 might_sleep();
5947
5948 __down_read(sem);
5949 + rwsem_set_reader_owned(sem);
5950 }
5951
5952 EXPORT_SYMBOL(down_read_non_owner);
5953 diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
5954 index 3e3c2004bb23..449d67edfa4b 100644
5955 --- a/kernel/printk/printk_safe.c
5956 +++ b/kernel/printk/printk_safe.c
5957 @@ -82,6 +82,7 @@ static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
5958 {
5959 int add;
5960 size_t len;
5961 + va_list ap;
5962
5963 again:
5964 len = atomic_read(&s->len);
5965 @@ -100,7 +101,9 @@ static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
5966 if (!len)
5967 smp_rmb();
5968
5969 - add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
5970 + va_copy(ap, args);
5971 + add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
5972 + va_end(ap);
5973 if (!add)
5974 return 0;
5975
5976 diff --git a/kernel/softirq.c b/kernel/softirq.c
5977 index 177de3640c78..8a040bcaa033 100644
5978 --- a/kernel/softirq.c
5979 +++ b/kernel/softirq.c
5980 @@ -139,9 +139,13 @@ static void __local_bh_enable(unsigned int cnt)
5981 {
5982 lockdep_assert_irqs_disabled();
5983
5984 + if (preempt_count() == cnt)
5985 + trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5986 +
5987 if (softirq_count() == (cnt & SOFTIRQ_MASK))
5988 trace_softirqs_on(_RET_IP_);
5989 - preempt_count_sub(cnt);
5990 +
5991 + __preempt_count_sub(cnt);
5992 }
5993
5994 /*
5995 diff --git a/kernel/time/time.c b/kernel/time/time.c
5996 index 3044d48ebe56..e8127f4e9e66 100644
5997 --- a/kernel/time/time.c
5998 +++ b/kernel/time/time.c
5999 @@ -28,6 +28,7 @@
6000 */
6001
6002 #include <linux/export.h>
6003 +#include <linux/kernel.h>
6004 #include <linux/timex.h>
6005 #include <linux/capability.h>
6006 #include <linux/timekeeper_internal.h>
6007 @@ -314,9 +315,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
6008 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
6009 #else
6010 # if BITS_PER_LONG == 32
6011 - return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
6012 + return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
6013 + HZ_TO_MSEC_SHR32;
6014 # else
6015 - return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
6016 + return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
6017 # endif
6018 #endif
6019 }
6020 diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
6021 index 7d306b74230f..c44f74daefbf 100644
6022 --- a/kernel/trace/trace_events_filter.c
6023 +++ b/kernel/trace/trace_events_filter.c
6024 @@ -78,7 +78,8 @@ static const char * ops[] = { OPS };
6025 C(TOO_MANY_PREDS, "Too many terms in predicate expression"), \
6026 C(INVALID_FILTER, "Meaningless filter expression"), \
6027 C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \
6028 - C(INVALID_VALUE, "Invalid value (did you forget quotes)?"),
6029 + C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \
6030 + C(NO_FILTER, "No filter found"),
6031
6032 #undef C
6033 #define C(a, b) FILT_ERR_##a
6034 @@ -550,6 +551,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
6035 goto out_free;
6036 }
6037
6038 + if (!N) {
6039 + /* No program? */
6040 + ret = -EINVAL;
6041 + parse_error(pe, FILT_ERR_NO_FILTER, ptr - str);
6042 + goto out_free;
6043 + }
6044 +
6045 prog[N].pred = NULL; /* #13 */
6046 prog[N].target = 1; /* TRUE */
6047 prog[N+1].pred = NULL;
6048 diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
6049 index 3d35d062970d..c253c1b46c6b 100644
6050 --- a/lib/Kconfig.kasan
6051 +++ b/lib/Kconfig.kasan
6052 @@ -6,6 +6,7 @@ if HAVE_ARCH_KASAN
6053 config KASAN
6054 bool "KASan: runtime memory debugger"
6055 depends on SLUB || (SLAB && !DEBUG_SLAB)
6056 + select SLUB_DEBUG if SLUB
6057 select CONSTRUCTORS
6058 select STACKDEPOT
6059 help
6060 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
6061 index 23920c5ff728..91320e5bfd5b 100644
6062 --- a/lib/vsprintf.c
6063 +++ b/lib/vsprintf.c
6064 @@ -1456,9 +1456,6 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
6065 return string(buf, end, NULL, spec);
6066
6067 switch (fmt[1]) {
6068 - case 'r':
6069 - return number(buf, end, clk_get_rate(clk), spec);
6070 -
6071 case 'n':
6072 default:
6073 #ifdef CONFIG_COMMON_CLK
6074 diff --git a/mm/gup.c b/mm/gup.c
6075 index 541904a7c60f..3d8472d48a0b 100644
6076 --- a/mm/gup.c
6077 +++ b/mm/gup.c
6078 @@ -1459,32 +1459,48 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
6079 return 1;
6080 }
6081
6082 -static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
6083 +static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
6084 unsigned long end, struct page **pages, int *nr)
6085 {
6086 unsigned long fault_pfn;
6087 + int nr_start = *nr;
6088 +
6089 + fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
6090 + if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
6091 + return 0;
6092
6093 - fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
6094 - return __gup_device_huge(fault_pfn, addr, end, pages, nr);
6095 + if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
6096 + undo_dev_pagemap(nr, nr_start, pages);
6097 + return 0;
6098 + }
6099 + return 1;
6100 }
6101
6102 -static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
6103 +static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
6104 unsigned long end, struct page **pages, int *nr)
6105 {
6106 unsigned long fault_pfn;
6107 + int nr_start = *nr;
6108 +
6109 + fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
6110 + if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
6111 + return 0;
6112
6113 - fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
6114 - return __gup_device_huge(fault_pfn, addr, end, pages, nr);
6115 + if (unlikely(pud_val(orig) != pud_val(*pudp))) {
6116 + undo_dev_pagemap(nr, nr_start, pages);
6117 + return 0;
6118 + }
6119 + return 1;
6120 }
6121 #else
6122 -static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
6123 +static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
6124 unsigned long end, struct page **pages, int *nr)
6125 {
6126 BUILD_BUG();
6127 return 0;
6128 }
6129
6130 -static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
6131 +static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
6132 unsigned long end, struct page **pages, int *nr)
6133 {
6134 BUILD_BUG();
6135 @@ -1502,7 +1518,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
6136 return 0;
6137
6138 if (pmd_devmap(orig))
6139 - return __gup_device_huge_pmd(orig, addr, end, pages, nr);
6140 + return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
6141
6142 refs = 0;
6143 page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
6144 @@ -1540,7 +1556,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
6145 return 0;
6146
6147 if (pud_devmap(orig))
6148 - return __gup_device_huge_pud(orig, addr, end, pages, nr);
6149 + return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
6150
6151 refs = 0;
6152 page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
6153 diff --git a/mm/ksm.c b/mm/ksm.c
6154 index e3cbf9a92f3c..e6a9640580fc 100644
6155 --- a/mm/ksm.c
6156 +++ b/mm/ksm.c
6157 @@ -199,6 +199,8 @@ struct rmap_item {
6158 #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
6159 #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
6160 #define STABLE_FLAG 0x200 /* is listed from the stable tree */
6161 +#define KSM_FLAG_MASK (SEQNR_MASK|UNSTABLE_FLAG|STABLE_FLAG)
6162 + /* to mask all the flags */
6163
6164 /* The stable and unstable tree heads */
6165 static struct rb_root one_stable_tree[1] = { RB_ROOT };
6166 @@ -2570,10 +2572,15 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
6167 anon_vma_lock_read(anon_vma);
6168 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
6169 0, ULONG_MAX) {
6170 + unsigned long addr;
6171 +
6172 cond_resched();
6173 vma = vmac->vma;
6174 - if (rmap_item->address < vma->vm_start ||
6175 - rmap_item->address >= vma->vm_end)
6176 +
6177 + /* Ignore the stable/unstable/sqnr flags */
6178 + addr = rmap_item->address & ~KSM_FLAG_MASK;
6179 +
6180 + if (addr < vma->vm_start || addr >= vma->vm_end)
6181 continue;
6182 /*
6183 * Initially we examine only the vma which covers this
6184 @@ -2587,8 +2594,7 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
6185 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
6186 continue;
6187
6188 - if (!rwc->rmap_one(page, vma,
6189 - rmap_item->address, rwc->arg)) {
6190 + if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
6191 anon_vma_unlock_read(anon_vma);
6192 return;
6193 }
6194 diff --git a/mm/slab_common.c b/mm/slab_common.c
6195 index 98dcdc352062..65408ced18f1 100644
6196 --- a/mm/slab_common.c
6197 +++ b/mm/slab_common.c
6198 @@ -566,10 +566,14 @@ static int shutdown_cache(struct kmem_cache *s)
6199 list_del(&s->list);
6200
6201 if (s->flags & SLAB_TYPESAFE_BY_RCU) {
6202 +#ifdef SLAB_SUPPORTS_SYSFS
6203 + sysfs_slab_unlink(s);
6204 +#endif
6205 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
6206 schedule_work(&slab_caches_to_rcu_destroy_work);
6207 } else {
6208 #ifdef SLAB_SUPPORTS_SYSFS
6209 + sysfs_slab_unlink(s);
6210 sysfs_slab_release(s);
6211 #else
6212 slab_kmem_cache_release(s);
6213 diff --git a/mm/slub.c b/mm/slub.c
6214 index 44aa7847324a..613c8dc2f409 100644
6215 --- a/mm/slub.c
6216 +++ b/mm/slub.c
6217 @@ -5714,7 +5714,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
6218 kset_unregister(s->memcg_kset);
6219 #endif
6220 kobject_uevent(&s->kobj, KOBJ_REMOVE);
6221 - kobject_del(&s->kobj);
6222 out:
6223 kobject_put(&s->kobj);
6224 }
6225 @@ -5799,6 +5798,12 @@ static void sysfs_slab_remove(struct kmem_cache *s)
6226 schedule_work(&s->kobj_remove_work);
6227 }
6228
6229 +void sysfs_slab_unlink(struct kmem_cache *s)
6230 +{
6231 + if (slab_state >= FULL)
6232 + kobject_del(&s->kobj);
6233 +}
6234 +
6235 void sysfs_slab_release(struct kmem_cache *s)
6236 {
6237 if (slab_state >= FULL)
6238 diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
6239 index e8adad33d0bb..8e531ac9bc87 100644
6240 --- a/net/sunrpc/xprtrdma/rpc_rdma.c
6241 +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
6242 @@ -230,7 +230,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
6243 */
6244 *ppages = alloc_page(GFP_ATOMIC);
6245 if (!*ppages)
6246 - return -EAGAIN;
6247 + return -ENOBUFS;
6248 }
6249 seg->mr_page = *ppages;
6250 seg->mr_offset = (char *)page_base;
6251 diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
6252 index 245160373dab..cbf227d12c2b 100644
6253 --- a/security/selinux/selinuxfs.c
6254 +++ b/security/selinux/selinuxfs.c
6255 @@ -435,22 +435,16 @@ static int sel_release_policy(struct inode *inode, struct file *filp)
6256 static ssize_t sel_read_policy(struct file *filp, char __user *buf,
6257 size_t count, loff_t *ppos)
6258 {
6259 - struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
6260 struct policy_load_memory *plm = filp->private_data;
6261 int ret;
6262
6263 - mutex_lock(&fsi->mutex);
6264 -
6265 ret = avc_has_perm(&selinux_state,
6266 current_sid(), SECINITSID_SECURITY,
6267 SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL);
6268 if (ret)
6269 - goto out;
6270 + return ret;
6271
6272 - ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
6273 -out:
6274 - mutex_unlock(&fsi->mutex);
6275 - return ret;
6276 + return simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
6277 }
6278
6279 static int sel_mmap_policy_fault(struct vm_fault *vmf)
6280 @@ -1182,25 +1176,29 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
6281 ret = -EINVAL;
6282 if (index >= fsi->bool_num || strcmp(name,
6283 fsi->bool_pending_names[index]))
6284 - goto out;
6285 + goto out_unlock;
6286
6287 ret = -ENOMEM;
6288 page = (char *)get_zeroed_page(GFP_KERNEL);
6289 if (!page)
6290 - goto out;
6291 + goto out_unlock;
6292
6293 cur_enforcing = security_get_bool_value(fsi->state, index);
6294 if (cur_enforcing < 0) {
6295 ret = cur_enforcing;
6296 - goto out;
6297 + goto out_unlock;
6298 }
6299 length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
6300 fsi->bool_pending_values[index]);
6301 - ret = simple_read_from_buffer(buf, count, ppos, page, length);
6302 -out:
6303 mutex_unlock(&fsi->mutex);
6304 + ret = simple_read_from_buffer(buf, count, ppos, page, length);
6305 +out_free:
6306 free_page((unsigned long)page);
6307 return ret;
6308 +
6309 +out_unlock:
6310 + mutex_unlock(&fsi->mutex);
6311 + goto out_free;
6312 }
6313
6314 static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
6315 @@ -1213,6 +1211,17 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
6316 unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
6317 const char *name = filep->f_path.dentry->d_name.name;
6318
6319 + if (count >= PAGE_SIZE)
6320 + return -ENOMEM;
6321 +
6322 + /* No partial writes. */
6323 + if (*ppos != 0)
6324 + return -EINVAL;
6325 +
6326 + page = memdup_user_nul(buf, count);
6327 + if (IS_ERR(page))
6328 + return PTR_ERR(page);
6329 +
6330 mutex_lock(&fsi->mutex);
6331
6332 length = avc_has_perm(&selinux_state,
6333 @@ -1227,22 +1236,6 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
6334 fsi->bool_pending_names[index]))
6335 goto out;
6336
6337 - length = -ENOMEM;
6338 - if (count >= PAGE_SIZE)
6339 - goto out;
6340 -
6341 - /* No partial writes. */
6342 - length = -EINVAL;
6343 - if (*ppos != 0)
6344 - goto out;
6345 -
6346 - page = memdup_user_nul(buf, count);
6347 - if (IS_ERR(page)) {
6348 - length = PTR_ERR(page);
6349 - page = NULL;
6350 - goto out;
6351 - }
6352 -
6353 length = -EINVAL;
6354 if (sscanf(page, "%d", &new_value) != 1)
6355 goto out;
6356 @@ -1274,6 +1267,17 @@ static ssize_t sel_commit_bools_write(struct file *filep,
6357 ssize_t length;
6358 int new_value;
6359
6360 + if (count >= PAGE_SIZE)
6361 + return -ENOMEM;
6362 +
6363 + /* No partial writes. */
6364 + if (*ppos != 0)
6365 + return -EINVAL;
6366 +
6367 + page = memdup_user_nul(buf, count);
6368 + if (IS_ERR(page))
6369 + return PTR_ERR(page);
6370 +
6371 mutex_lock(&fsi->mutex);
6372
6373 length = avc_has_perm(&selinux_state,
6374 @@ -1283,22 +1287,6 @@ static ssize_t sel_commit_bools_write(struct file *filep,
6375 if (length)
6376 goto out;
6377
6378 - length = -ENOMEM;
6379 - if (count >= PAGE_SIZE)
6380 - goto out;
6381 -
6382 - /* No partial writes. */
6383 - length = -EINVAL;
6384 - if (*ppos != 0)
6385 - goto out;
6386 -
6387 - page = memdup_user_nul(buf, count);
6388 - if (IS_ERR(page)) {
6389 - length = PTR_ERR(page);
6390 - page = NULL;
6391 - goto out;
6392 - }
6393 -
6394 length = -EINVAL;
6395 if (sscanf(page, "%d", &new_value) != 1)
6396 goto out;
6397 diff --git a/sound/core/timer.c b/sound/core/timer.c
6398 index 0ddcae495838..e9e73edb4bd8 100644
6399 --- a/sound/core/timer.c
6400 +++ b/sound/core/timer.c
6401 @@ -1517,7 +1517,7 @@ static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
6402 } else {
6403 if (id.subdevice < 0)
6404 id.subdevice = 0;
6405 - else
6406 + else if (id.subdevice < INT_MAX)
6407 id.subdevice++;
6408 }
6409 }
6410 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
6411 index 5bc3a7468e17..4d26bb010ddf 100644
6412 --- a/sound/pci/hda/hda_codec.c
6413 +++ b/sound/pci/hda/hda_codec.c
6414 @@ -2887,8 +2887,9 @@ static int hda_codec_runtime_suspend(struct device *dev)
6415 list_for_each_entry(pcm, &codec->pcm_list_head, list)
6416 snd_pcm_suspend_all(pcm->pcm);
6417 state = hda_call_codec_suspend(codec);
6418 - if (codec_has_clkstop(codec) && codec_has_epss(codec) &&
6419 - (state & AC_PWRST_CLK_STOP_OK))
6420 + if (codec->link_down_at_suspend ||
6421 + (codec_has_clkstop(codec) && codec_has_epss(codec) &&
6422 + (state & AC_PWRST_CLK_STOP_OK)))
6423 snd_hdac_codec_link_down(&codec->core);
6424 snd_hdac_link_power(&codec->core, false);
6425 return 0;
6426 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
6427 index 681c360f29f9..a8b1b31f161c 100644
6428 --- a/sound/pci/hda/hda_codec.h
6429 +++ b/sound/pci/hda/hda_codec.h
6430 @@ -258,6 +258,7 @@ struct hda_codec {
6431 unsigned int power_save_node:1; /* advanced PM for each widget */
6432 unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
6433 unsigned int force_pin_prefix:1; /* Add location prefix */
6434 + unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
6435 #ifdef CONFIG_PM
6436 unsigned long power_on_acct;
6437 unsigned long power_off_acct;
6438 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
6439 index 7d7eb1354eee..ed39a77f9253 100644
6440 --- a/sound/pci/hda/patch_hdmi.c
6441 +++ b/sound/pci/hda/patch_hdmi.c
6442 @@ -3741,6 +3741,11 @@ static int patch_atihdmi(struct hda_codec *codec)
6443
6444 spec->chmap.channels_max = max(spec->chmap.channels_max, 8u);
6445
6446 + /* AMD GPUs have neither EPSS nor CLKSTOP bits, hence preventing
6447 + * the link-down as is. Tell the core to allow it.
6448 + */
6449 + codec->link_down_at_suspend = 1;
6450 +
6451 return 0;
6452 }
6453
6454 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6455 index 06c2c80a045b..cb9a977bf188 100644
6456 --- a/sound/pci/hda/patch_realtek.c
6457 +++ b/sound/pci/hda/patch_realtek.c
6458 @@ -2542,6 +2542,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
6459 SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
6460 SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
6461 SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
6462 + SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270),
6463 SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
6464 SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
6465 SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
6466 @@ -4985,7 +4986,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
6467 struct alc_spec *spec = codec->spec;
6468
6469 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
6470 - spec->shutup = alc_no_shutup; /* reduce click noise */
6471 spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
6472 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
6473 codec->power_save_node = 0; /* avoid click noises */
6474 @@ -5384,6 +5384,13 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
6475 /* for hda_fixup_thinkpad_acpi() */
6476 #include "thinkpad_helper.c"
6477
6478 +static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
6479 + const struct hda_fixup *fix, int action)
6480 +{
6481 + alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */
6482 + hda_fixup_thinkpad_acpi(codec, fix, action);
6483 +}
6484 +
6485 /* for dell wmi mic mute led */
6486 #include "dell_wmi_helper.c"
6487
6488 @@ -5927,7 +5934,7 @@ static const struct hda_fixup alc269_fixups[] = {
6489 },
6490 [ALC269_FIXUP_THINKPAD_ACPI] = {
6491 .type = HDA_FIXUP_FUNC,
6492 - .v.func = hda_fixup_thinkpad_acpi,
6493 + .v.func = alc_fixup_thinkpad_acpi,
6494 .chained = true,
6495 .chain_id = ALC269_FIXUP_SKU_IGNORE,
6496 },
6497 @@ -6577,8 +6584,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6498 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6499 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6500 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6501 + SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6502 SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6503 - SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6504 + SND_PCI_QUIRK(0x17aa, 0x3136, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6505 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6506 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
6507 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
6508 @@ -6756,6 +6764,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6509 {0x14, 0x90170110},
6510 {0x19, 0x02a11030},
6511 {0x21, 0x02211020}),
6512 + SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
6513 + {0x14, 0x90170110},
6514 + {0x19, 0x02a11030},
6515 + {0x1a, 0x02a11040},
6516 + {0x1b, 0x01014020},
6517 + {0x21, 0x0221101f}),
6518 SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
6519 {0x12, 0x90a60140},
6520 {0x14, 0x90170110},
6521 diff --git a/sound/soc/cirrus/edb93xx.c b/sound/soc/cirrus/edb93xx.c
6522 index c53bd6f2c2d7..3d011abaa266 100644
6523 --- a/sound/soc/cirrus/edb93xx.c
6524 +++ b/sound/soc/cirrus/edb93xx.c
6525 @@ -67,7 +67,7 @@ static struct snd_soc_dai_link edb93xx_dai = {
6526 .cpu_dai_name = "ep93xx-i2s",
6527 .codec_name = "spi0.0",
6528 .codec_dai_name = "cs4271-hifi",
6529 - .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
6530 + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
6531 SND_SOC_DAIFMT_CBS_CFS,
6532 .ops = &edb93xx_ops,
6533 };
6534 diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
6535 index 934f8aefdd90..0dc3852c4621 100644
6536 --- a/sound/soc/cirrus/ep93xx-i2s.c
6537 +++ b/sound/soc/cirrus/ep93xx-i2s.c
6538 @@ -51,7 +51,9 @@
6539 #define EP93XX_I2S_WRDLEN_24 (1 << 0)
6540 #define EP93XX_I2S_WRDLEN_32 (2 << 0)
6541
6542 -#define EP93XX_I2S_LINCTRLDATA_R_JUST (1 << 2) /* Right justify */
6543 +#define EP93XX_I2S_RXLINCTRLDATA_R_JUST BIT(1) /* Right justify */
6544 +
6545 +#define EP93XX_I2S_TXLINCTRLDATA_R_JUST BIT(2) /* Right justify */
6546
6547 #define EP93XX_I2S_CLKCFG_LRS (1 << 0) /* lrclk polarity */
6548 #define EP93XX_I2S_CLKCFG_CKP (1 << 1) /* Bit clock polarity */
6549 @@ -170,25 +172,25 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
6550 unsigned int fmt)
6551 {
6552 struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(cpu_dai);
6553 - unsigned int clk_cfg, lin_ctrl;
6554 + unsigned int clk_cfg;
6555 + unsigned int txlin_ctrl = 0;
6556 + unsigned int rxlin_ctrl = 0;
6557
6558 clk_cfg = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXCLKCFG);
6559 - lin_ctrl = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXLINCTRLDATA);
6560
6561 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
6562 case SND_SOC_DAIFMT_I2S:
6563 clk_cfg |= EP93XX_I2S_CLKCFG_REL;
6564 - lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
6565 break;
6566
6567 case SND_SOC_DAIFMT_LEFT_J:
6568 clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
6569 - lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
6570 break;
6571
6572 case SND_SOC_DAIFMT_RIGHT_J:
6573 clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
6574 - lin_ctrl |= EP93XX_I2S_LINCTRLDATA_R_JUST;
6575 + rxlin_ctrl |= EP93XX_I2S_RXLINCTRLDATA_R_JUST;
6576 + txlin_ctrl |= EP93XX_I2S_TXLINCTRLDATA_R_JUST;
6577 break;
6578
6579 default:
6580 @@ -213,32 +215,32 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
6581 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
6582 case SND_SOC_DAIFMT_NB_NF:
6583 /* Negative bit clock, lrclk low on left word */
6584 - clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL);
6585 + clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS);
6586 break;
6587
6588 case SND_SOC_DAIFMT_NB_IF:
6589 /* Negative bit clock, lrclk low on right word */
6590 clk_cfg &= ~EP93XX_I2S_CLKCFG_CKP;
6591 - clk_cfg |= EP93XX_I2S_CLKCFG_REL;
6592 + clk_cfg |= EP93XX_I2S_CLKCFG_LRS;
6593 break;
6594
6595 case SND_SOC_DAIFMT_IB_NF:
6596 /* Positive bit clock, lrclk low on left word */
6597 clk_cfg |= EP93XX_I2S_CLKCFG_CKP;
6598 - clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
6599 + clk_cfg &= ~EP93XX_I2S_CLKCFG_LRS;
6600 break;
6601
6602 case SND_SOC_DAIFMT_IB_IF:
6603 /* Positive bit clock, lrclk low on right word */
6604 - clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL;
6605 + clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS;
6606 break;
6607 }
6608
6609 /* Write new register values */
6610 ep93xx_i2s_write_reg(info, EP93XX_I2S_RXCLKCFG, clk_cfg);
6611 ep93xx_i2s_write_reg(info, EP93XX_I2S_TXCLKCFG, clk_cfg);
6612 - ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, lin_ctrl);
6613 - ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, lin_ctrl);
6614 + ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, rxlin_ctrl);
6615 + ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, txlin_ctrl);
6616 return 0;
6617 }
6618
6619 diff --git a/sound/soc/cirrus/snappercl15.c b/sound/soc/cirrus/snappercl15.c
6620 index 2334ec19e7eb..11ff7b2672b2 100644
6621 --- a/sound/soc/cirrus/snappercl15.c
6622 +++ b/sound/soc/cirrus/snappercl15.c
6623 @@ -72,7 +72,7 @@ static struct snd_soc_dai_link snappercl15_dai = {
6624 .codec_dai_name = "tlv320aic23-hifi",
6625 .codec_name = "tlv320aic23-codec.0-001a",
6626 .platform_name = "ep93xx-i2s",
6627 - .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
6628 + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
6629 SND_SOC_DAIFMT_CBS_CFS,
6630 .ops = &snappercl15_ops,
6631 };
6632 diff --git a/sound/soc/codecs/cs35l35.c b/sound/soc/codecs/cs35l35.c
6633 index a4a2cb171bdf..bd6226bde45f 100644
6634 --- a/sound/soc/codecs/cs35l35.c
6635 +++ b/sound/soc/codecs/cs35l35.c
6636 @@ -1105,6 +1105,7 @@ static struct regmap_config cs35l35_regmap = {
6637 .readable_reg = cs35l35_readable_register,
6638 .precious_reg = cs35l35_precious_register,
6639 .cache_type = REGCACHE_RBTREE,
6640 + .use_single_rw = true,
6641 };
6642
6643 static irqreturn_t cs35l35_irq(int irq, void *data)
6644 diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
6645 index 53215b52e4f2..f8a06709f76d 100644
6646 --- a/sound/soc/mediatek/common/mtk-afe-platform-driver.c
6647 +++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
6648 @@ -64,14 +64,14 @@ static const struct snd_pcm_ops mtk_afe_pcm_ops = {
6649 static int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
6650 {
6651 size_t size;
6652 - struct snd_card *card = rtd->card->snd_card;
6653 struct snd_pcm *pcm = rtd->pcm;
6654 struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
6655 struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
6656
6657 size = afe->mtk_afe_hardware->buffer_bytes_max;
6658 return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
6659 - card->dev, size, size);
6660 + rtd->platform->dev,
6661 + size, size);
6662 }
6663
6664 static void mtk_afe_pcm_free(struct snd_pcm *pcm)
6665 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
6666 index 2d9709104ec5..b2b501ef57d7 100644
6667 --- a/sound/soc/soc-dapm.c
6668 +++ b/sound/soc/soc-dapm.c
6669 @@ -433,6 +433,8 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
6670 static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
6671 {
6672 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
6673 +
6674 + list_del(&data->paths);
6675 kfree(data->wlist);
6676 kfree(data);
6677 }
6678 diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
6679 index 36ef45b2e89d..09c4a4a7b5dd 100644
6680 --- a/tools/perf/util/dso.c
6681 +++ b/tools/perf/util/dso.c
6682 @@ -354,6 +354,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
6683 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
6684 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
6685 (strncmp(name, "[vdso]", 6) == 0) ||
6686 + (strncmp(name, "[vdso32]", 8) == 0) ||
6687 + (strncmp(name, "[vdsox32]", 9) == 0) ||
6688 (strncmp(name, "[vsyscall]", 10) == 0)) {
6689 m->kmod = false;
6690
6691 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
6692 index f9157aed1289..d404bed7003a 100644
6693 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
6694 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
6695 @@ -113,6 +113,7 @@ struct intel_pt_decoder {
6696 bool have_cyc;
6697 bool fixup_last_mtc;
6698 bool have_last_ip;
6699 + enum intel_pt_param_flags flags;
6700 uint64_t pos;
6701 uint64_t last_ip;
6702 uint64_t ip;
6703 @@ -226,6 +227,8 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
6704 decoder->return_compression = params->return_compression;
6705 decoder->branch_enable = params->branch_enable;
6706
6707 + decoder->flags = params->flags;
6708 +
6709 decoder->period = params->period;
6710 decoder->period_type = params->period_type;
6711
6712 @@ -1097,6 +1100,15 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
6713 return ret;
6714 }
6715
6716 +static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder,
6717 + struct intel_pt_insn *intel_pt_insn,
6718 + uint64_t ip, int err)
6719 +{
6720 + return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err &&
6721 + intel_pt_insn->branch == INTEL_PT_BR_INDIRECT &&
6722 + ip == decoder->ip + intel_pt_insn->length;
6723 +}
6724 +
6725 static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
6726 {
6727 struct intel_pt_insn intel_pt_insn;
6728 @@ -1109,10 +1121,11 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
6729 err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
6730 if (err == INTEL_PT_RETURN)
6731 return 0;
6732 - if (err == -EAGAIN) {
6733 + if (err == -EAGAIN ||
6734 + intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
6735 if (intel_pt_fup_event(decoder))
6736 return 0;
6737 - return err;
6738 + return -EAGAIN;
6739 }
6740 decoder->set_fup_tx_flags = false;
6741 if (err)
6742 @@ -1376,7 +1389,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
6743 {
6744 intel_pt_log("ERROR: Buffer overflow\n");
6745 intel_pt_clear_tx_flags(decoder);
6746 - decoder->have_tma = false;
6747 decoder->cbr = 0;
6748 decoder->timestamp_insn_cnt = 0;
6749 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
6750 @@ -1604,7 +1616,6 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
6751 case INTEL_PT_PSB:
6752 case INTEL_PT_TSC:
6753 case INTEL_PT_TMA:
6754 - case INTEL_PT_CBR:
6755 case INTEL_PT_MODE_TSX:
6756 case INTEL_PT_BAD:
6757 case INTEL_PT_PSBEND:
6758 @@ -1620,6 +1631,10 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
6759 decoder->pkt_step = 0;
6760 return -ENOENT;
6761
6762 + case INTEL_PT_CBR:
6763 + intel_pt_calc_cbr(decoder);
6764 + break;
6765 +
6766 case INTEL_PT_OVF:
6767 return intel_pt_overflow(decoder);
6768
6769 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
6770 index fc1752d50019..51c18d67f4ca 100644
6771 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
6772 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
6773 @@ -60,6 +60,14 @@ enum {
6774 INTEL_PT_ERR_MAX,
6775 };
6776
6777 +enum intel_pt_param_flags {
6778 + /*
6779 + * FUP packet can contain next linear instruction pointer instead of
6780 + * current linear instruction pointer.
6781 + */
6782 + INTEL_PT_FUP_WITH_NLIP = 1 << 0,
6783 +};
6784 +
6785 struct intel_pt_state {
6786 enum intel_pt_sample_type type;
6787 int err;
6788 @@ -106,6 +114,7 @@ struct intel_pt_params {
6789 unsigned int mtc_period;
6790 uint32_t tsc_ctc_ratio_n;
6791 uint32_t tsc_ctc_ratio_d;
6792 + enum intel_pt_param_flags flags;
6793 };
6794
6795 struct intel_pt_decoder;
6796 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
6797 index ba4c9dd18643..d426761a549d 100644
6798 --- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
6799 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
6800 @@ -366,7 +366,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
6801 if (len < offs)
6802 return INTEL_PT_NEED_MORE_BYTES;
6803 byte = buf[offs++];
6804 - payload |= (byte >> 1) << shift;
6805 + payload |= ((uint64_t)byte >> 1) << shift;
6806 }
6807
6808 packet->type = INTEL_PT_CYC;
6809 diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
6810 index 0effaff57020..38b25e826a45 100644
6811 --- a/tools/perf/util/intel-pt.c
6812 +++ b/tools/perf/util/intel-pt.c
6813 @@ -751,6 +751,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
6814 unsigned int queue_nr)
6815 {
6816 struct intel_pt_params params = { .get_trace = 0, };
6817 + struct perf_env *env = pt->machine->env;
6818 struct intel_pt_queue *ptq;
6819
6820 ptq = zalloc(sizeof(struct intel_pt_queue));
6821 @@ -832,6 +833,9 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
6822 }
6823 }
6824
6825 + if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
6826 + params.flags |= INTEL_PT_FUP_WITH_NLIP;
6827 +
6828 ptq->decoder = intel_pt_decoder_new(&params);
6829 if (!ptq->decoder)
6830 goto out_free;
6831 @@ -1523,6 +1527,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
6832
6833 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
6834 switch (ptq->switch_state) {
6835 + case INTEL_PT_SS_NOT_TRACING:
6836 case INTEL_PT_SS_UNKNOWN:
6837 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
6838 err = intel_pt_next_tid(pt, ptq);
6839 diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
6840 index 2a4f16fc9819..8393b1c06027 100644
6841 --- a/tools/testing/selftests/ftrace/test.d/functions
6842 +++ b/tools/testing/selftests/ftrace/test.d/functions
6843 @@ -15,14 +15,29 @@ reset_tracer() { # reset the current tracer
6844 echo nop > current_tracer
6845 }
6846
6847 -reset_trigger() { # reset all current setting triggers
6848 - grep -v ^# events/*/*/trigger |
6849 +reset_trigger_file() {
6850 + # remove action triggers first
6851 + grep -H ':on[^:]*(' $@ |
6852 + while read line; do
6853 + cmd=`echo $line | cut -f2- -d: | cut -f1 -d" "`
6854 + file=`echo $line | cut -f1 -d:`
6855 + echo "!$cmd" >> $file
6856 + done
6857 + grep -Hv ^# $@ |
6858 while read line; do
6859 cmd=`echo $line | cut -f2- -d: | cut -f1 -d" "`
6860 - echo "!$cmd" > `echo $line | cut -f1 -d:`
6861 + file=`echo $line | cut -f1 -d:`
6862 + echo "!$cmd" > $file
6863 done
6864 }
6865
6866 +reset_trigger() { # reset all current setting triggers
6867 + if [ -d events/synthetic ]; then
6868 + reset_trigger_file events/synthetic/*/trigger
6869 + fi
6870 + reset_trigger_file events/*/*/trigger
6871 +}
6872 +
6873 reset_events_filter() { # reset all current setting filters
6874 grep -v ^none events/*/*/filter |
6875 while read line; do